diff --git a/packages/cli/src/evaluation.ee/test-runner/test-runner.service.ee.ts b/packages/cli/src/evaluation.ee/test-runner/test-runner.service.ee.ts index 2d7cd6b58f..836911fd0e 100644 --- a/packages/cli/src/evaluation.ee/test-runner/test-runner.service.ee.ts +++ b/packages/cli/src/evaluation.ee/test-runner/test-runner.service.ee.ts @@ -15,6 +15,7 @@ import type { IExecuteData, INodeExecutionData, AssignmentCollectionValue, + GenericValue, } from 'n8n-workflow'; import assert from 'node:assert'; @@ -386,6 +387,20 @@ export class TestRunnerService { const testRun = await this.testRunRepository.createTestRun(workflowId); assert(testRun, 'Unable to create a test run'); + // Initialize telemetry metadata + const telemetryMeta = { + workflow_id: workflowId, + test_type: 'evaluation', + run_id: testRun.id, + start: Date.now(), + status: 'success' as 'success' | 'fail' | 'cancelled', + test_case_count: 0, + errored_test_case_count: 0, + metric_count: 0, + error_message: '', + duration: 0, + }; + // 0.1 Initialize AbortController const abortController = new AbortController(); this.abortControllers.set(testRun.id, abortController); @@ -397,8 +412,6 @@ export class TestRunnerService { userId: user.id, }; - let testRunEndStatusForTelemetry; - const abortSignal = abortController.signal; const { manager: dbManager } = this.testRunRepository; @@ -428,6 +441,7 @@ export class TestRunnerService { ); const testCases = datasetTriggerOutput.map((items) => ({ json: items.json })); + telemetryMeta.test_case_count = testCases.length; this.logger.debug('Found test cases', { count: testCases.length }); @@ -440,6 +454,7 @@ export class TestRunnerService { for (const testCase of testCases) { if (abortSignal.aborted) { + telemetryMeta.status = 'cancelled'; this.logger.debug('Test run was cancelled', { workflowId, }); @@ -484,6 +499,7 @@ export class TestRunnerService { errorCode: 'FAILED_TO_EXECUTE_WORKFLOW', metrics: {}, }); + telemetryMeta.errored_test_case_count++; continue; } const completedAt = new Date(); @@ -503,6 +519,7 @@ export class TestRunnerService { status: 'error', errorCode: 'NO_METRICS_COLLECTED', }); + telemetryMeta.errored_test_case_count++; } else { this.logger.debug('Test case metrics extracted', addedMetrics); // Create a new test case execution in DB @@ -526,6 +543,8 @@ export class TestRunnerService { error: e, }); + telemetryMeta.errored_test_case_count++; + // In case of an unexpected error save it as failed test case execution and continue with the next test case if (e instanceof TestCaseExecutionError) { await this.testCaseExecutionRepository.createTestCaseExecution({ @@ -560,21 +579,21 @@ export class TestRunnerService { await dbManager.transaction(async (trx) => { await this.testRunRepository.markAsCancelled(testRun.id, trx); await this.testCaseExecutionRepository.markAllPendingAsCancelled(testRun.id, trx); - - testRunEndStatusForTelemetry = 'cancelled'; }); + telemetryMeta.status = 'cancelled'; } else { const aggregatedMetrics = metrics.getAggregatedMetrics(); + telemetryMeta.metric_count = Object.keys(aggregatedMetrics).length; this.logger.debug('Aggregated metrics', aggregatedMetrics); await this.testRunRepository.markAsCompleted(testRun.id, aggregatedMetrics); this.logger.debug('Test run finished', { workflowId, testRunId: testRun.id }); - - testRunEndStatusForTelemetry = 'completed'; } } catch (e) { + telemetryMeta.status = 'fail'; + if (e instanceof ExecutionCancelledError) { this.logger.debug('Evaluation execution was cancelled. Cancelling test run', { testRunId: testRun.id, @@ -586,25 +605,43 @@ export class TestRunnerService { await this.testCaseExecutionRepository.markAllPendingAsCancelled(testRun.id, trx); }); - testRunEndStatusForTelemetry = 'cancelled'; + telemetryMeta.status = 'cancelled'; } else if (e instanceof TestRunError) { await this.testRunRepository.markAsError(testRun.id, e.code, e.extra as IDataObject); - testRunEndStatusForTelemetry = 'error'; + telemetryMeta.error_message = e.code; + if (e.extra && typeof e.extra === 'object' && 'message' in e.extra) { + telemetryMeta.error_message += `: ${String(e.extra.message)}`; + } } else { await this.testRunRepository.markAsError(testRun.id, 'UNKNOWN_ERROR'); - testRunEndStatusForTelemetry = 'error'; + telemetryMeta.error_message = e instanceof Error ? e.message : 'UNKNOWN_ERROR'; throw e; } } finally { + // Calculate duration + telemetryMeta.duration = Date.now() - telemetryMeta.start; + // Clean up abort controller this.abortControllers.delete(testRun.id); - // Send telemetry event - this.telemetry.track('Test run finished', { - workflow_id: workflowId, - run_id: testRun.id, - status: testRunEndStatusForTelemetry, - }); + // Send telemetry event with complete metadata + const telemetryPayload: Record = { + ...telemetryMeta, + }; + + // Add success-specific fields + if (telemetryMeta.status === 'success') { + telemetryPayload.test_case_count = telemetryMeta.test_case_count; + telemetryPayload.errored_test_case_count = telemetryMeta.errored_test_case_count; + telemetryPayload.metric_count = telemetryMeta.metric_count; + } + + // Add fail-specific fields + if (telemetryMeta.status === 'fail') { + telemetryPayload.error_message = telemetryMeta.error_message; + } + + this.telemetry.track('Test run finished', telemetryPayload); } } diff --git a/packages/frontend/editor-ui/src/views/Evaluations.ee/EvaluationsRootView.vue b/packages/frontend/editor-ui/src/views/Evaluations.ee/EvaluationsRootView.vue index 40158f0351..33e62ba06b 100644 --- a/packages/frontend/editor-ui/src/views/Evaluations.ee/EvaluationsRootView.vue +++ b/packages/frontend/editor-ui/src/views/Evaluations.ee/EvaluationsRootView.vue @@ -4,12 +4,14 @@ import { useUsageStore } from '@/stores/usage.store'; import { useAsyncState } from '@vueuse/core'; import { PLACEHOLDER_EMPTY_WORKFLOW_ID } from '@/constants'; import { useCanvasOperations } from '@/composables/useCanvasOperations'; +import { useTelemetry } from '@/composables/useTelemetry'; import { useToast } from '@/composables/useToast'; import { useI18n } from '@/composables/useI18n'; import { useRouter } from 'vue-router'; -import { useNodeTypesStore } from '@/stores/nodeTypes.store'; import { useEvaluationStore } from '@/stores/evaluation.store.ee'; -import { computed } from 'vue'; +import { useNodeTypesStore } from '@/stores/nodeTypes.store'; + +import { computed, watch } from 'vue'; import { N8nLink, N8nText } from '@n8n/design-system'; import EvaluationsPaywall from '@/components/Evaluations.ee/Paywall/EvaluationsPaywall.vue'; import SetupWizard from '@/components/Evaluations.ee/SetupWizard/SetupWizard.vue'; @@ -21,10 +23,11 @@ const props = defineProps<{ const workflowsStore = useWorkflowsStore(); const usageStore = useUsageStore(); const evaluationStore = useEvaluationStore(); +const nodeTypesStore = useNodeTypesStore(); +const telemetry = useTelemetry(); const router = useRouter(); const toast = useToast(); const locale = useI18n(); -const nodeTypesStore = useNodeTypesStore(); const { initializeWorkspace } = useCanvasOperations({ router }); @@ -32,13 +35,18 @@ const evaluationsLicensed = computed(() => { return usageStore.workflowsWithEvaluationsLimit !== 0; }); -const showWizard = computed(() => { - const runs = Object.values(evaluationStore.testRunsById ?? {}).filter( +const runs = computed(() => { + return Object.values(evaluationStore.testRunsById ?? {}).filter( ({ workflowId }) => workflowId === props.name, ); - return runs.length === 0; }); +const hasRuns = computed(() => { + return runs.value.length > 0; +}); + +const showWizard = computed(() => !hasRuns.value); + // Method to run a test - will be used by the SetupWizard component async function runTest() { try { @@ -54,6 +62,14 @@ async function runTest() { } } +const evaluationsQuotaExceeded = computed(() => { + return ( + usageStore.workflowsWithEvaluationsLimit !== -1 && + usageStore.workflowsWithEvaluationsCount >= usageStore.workflowsWithEvaluationsLimit && + !hasRuns.value + ); +}); + const { isReady } = useAsyncState(async () => { try { await usageStore.getLicenseInfo(); @@ -83,6 +99,33 @@ const { isReady } = useAsyncState(async () => { } } }, undefined); + +watch( + isReady, + (ready) => { + if (ready) { + if (showWizard.value) { + telemetry.track('User viewed tests tab', { + workflow_id: props.name, + test_type: 'evaluation', + view: 'setup', + trigger_set_up: evaluationStore.evaluationTriggerExists, + output_set_up: evaluationStore.evaluationSetOutputsNodeExist, + metrics_set_up: evaluationStore.evaluationSetMetricsNodeExist, + quota_reached: evaluationsQuotaExceeded.value, + }); + } else { + telemetry.track('User viewed tests tab', { + workflow_id: props.name, + test_type: 'evaluation', + view: 'overview', + run_count: runs.value.length, + }); + } + } + }, + { immediate: true }, +);