mirror of
https://github.com/Abdulazizzn/n8n-enterprise-unlocked.git
synced 2025-12-17 10:02:05 +00:00
feat(core): Add telemetry for Evaluation feature (no-changelog) (#15562)
Co-authored-by: Yiorgis Gozadinos <yiorgis@n8n.io>
This commit is contained in:
@@ -15,6 +15,7 @@ import type {
|
|||||||
IExecuteData,
|
IExecuteData,
|
||||||
INodeExecutionData,
|
INodeExecutionData,
|
||||||
AssignmentCollectionValue,
|
AssignmentCollectionValue,
|
||||||
|
GenericValue,
|
||||||
} from 'n8n-workflow';
|
} from 'n8n-workflow';
|
||||||
import assert from 'node:assert';
|
import assert from 'node:assert';
|
||||||
|
|
||||||
@@ -386,6 +387,20 @@ export class TestRunnerService {
|
|||||||
const testRun = await this.testRunRepository.createTestRun(workflowId);
|
const testRun = await this.testRunRepository.createTestRun(workflowId);
|
||||||
assert(testRun, 'Unable to create a test run');
|
assert(testRun, 'Unable to create a test run');
|
||||||
|
|
||||||
|
// Initialize telemetry metadata
|
||||||
|
const telemetryMeta = {
|
||||||
|
workflow_id: workflowId,
|
||||||
|
test_type: 'evaluation',
|
||||||
|
run_id: testRun.id,
|
||||||
|
start: Date.now(),
|
||||||
|
status: 'success' as 'success' | 'fail' | 'cancelled',
|
||||||
|
test_case_count: 0,
|
||||||
|
errored_test_case_count: 0,
|
||||||
|
metric_count: 0,
|
||||||
|
error_message: '',
|
||||||
|
duration: 0,
|
||||||
|
};
|
||||||
|
|
||||||
// 0.1 Initialize AbortController
|
// 0.1 Initialize AbortController
|
||||||
const abortController = new AbortController();
|
const abortController = new AbortController();
|
||||||
this.abortControllers.set(testRun.id, abortController);
|
this.abortControllers.set(testRun.id, abortController);
|
||||||
@@ -397,8 +412,6 @@ export class TestRunnerService {
|
|||||||
userId: user.id,
|
userId: user.id,
|
||||||
};
|
};
|
||||||
|
|
||||||
let testRunEndStatusForTelemetry;
|
|
||||||
|
|
||||||
const abortSignal = abortController.signal;
|
const abortSignal = abortController.signal;
|
||||||
const { manager: dbManager } = this.testRunRepository;
|
const { manager: dbManager } = this.testRunRepository;
|
||||||
|
|
||||||
@@ -428,6 +441,7 @@ export class TestRunnerService {
|
|||||||
);
|
);
|
||||||
|
|
||||||
const testCases = datasetTriggerOutput.map((items) => ({ json: items.json }));
|
const testCases = datasetTriggerOutput.map((items) => ({ json: items.json }));
|
||||||
|
telemetryMeta.test_case_count = testCases.length;
|
||||||
|
|
||||||
this.logger.debug('Found test cases', { count: testCases.length });
|
this.logger.debug('Found test cases', { count: testCases.length });
|
||||||
|
|
||||||
@@ -440,6 +454,7 @@ export class TestRunnerService {
|
|||||||
|
|
||||||
for (const testCase of testCases) {
|
for (const testCase of testCases) {
|
||||||
if (abortSignal.aborted) {
|
if (abortSignal.aborted) {
|
||||||
|
telemetryMeta.status = 'cancelled';
|
||||||
this.logger.debug('Test run was cancelled', {
|
this.logger.debug('Test run was cancelled', {
|
||||||
workflowId,
|
workflowId,
|
||||||
});
|
});
|
||||||
@@ -484,6 +499,7 @@ export class TestRunnerService {
|
|||||||
errorCode: 'FAILED_TO_EXECUTE_WORKFLOW',
|
errorCode: 'FAILED_TO_EXECUTE_WORKFLOW',
|
||||||
metrics: {},
|
metrics: {},
|
||||||
});
|
});
|
||||||
|
telemetryMeta.errored_test_case_count++;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
const completedAt = new Date();
|
const completedAt = new Date();
|
||||||
@@ -503,6 +519,7 @@ export class TestRunnerService {
|
|||||||
status: 'error',
|
status: 'error',
|
||||||
errorCode: 'NO_METRICS_COLLECTED',
|
errorCode: 'NO_METRICS_COLLECTED',
|
||||||
});
|
});
|
||||||
|
telemetryMeta.errored_test_case_count++;
|
||||||
} else {
|
} else {
|
||||||
this.logger.debug('Test case metrics extracted', addedMetrics);
|
this.logger.debug('Test case metrics extracted', addedMetrics);
|
||||||
// Create a new test case execution in DB
|
// Create a new test case execution in DB
|
||||||
@@ -526,6 +543,8 @@ export class TestRunnerService {
|
|||||||
error: e,
|
error: e,
|
||||||
});
|
});
|
||||||
|
|
||||||
|
telemetryMeta.errored_test_case_count++;
|
||||||
|
|
||||||
// In case of an unexpected error save it as failed test case execution and continue with the next test case
|
// In case of an unexpected error save it as failed test case execution and continue with the next test case
|
||||||
if (e instanceof TestCaseExecutionError) {
|
if (e instanceof TestCaseExecutionError) {
|
||||||
await this.testCaseExecutionRepository.createTestCaseExecution({
|
await this.testCaseExecutionRepository.createTestCaseExecution({
|
||||||
@@ -560,21 +579,21 @@ export class TestRunnerService {
|
|||||||
await dbManager.transaction(async (trx) => {
|
await dbManager.transaction(async (trx) => {
|
||||||
await this.testRunRepository.markAsCancelled(testRun.id, trx);
|
await this.testRunRepository.markAsCancelled(testRun.id, trx);
|
||||||
await this.testCaseExecutionRepository.markAllPendingAsCancelled(testRun.id, trx);
|
await this.testCaseExecutionRepository.markAllPendingAsCancelled(testRun.id, trx);
|
||||||
|
|
||||||
testRunEndStatusForTelemetry = 'cancelled';
|
|
||||||
});
|
});
|
||||||
|
telemetryMeta.status = 'cancelled';
|
||||||
} else {
|
} else {
|
||||||
const aggregatedMetrics = metrics.getAggregatedMetrics();
|
const aggregatedMetrics = metrics.getAggregatedMetrics();
|
||||||
|
telemetryMeta.metric_count = Object.keys(aggregatedMetrics).length;
|
||||||
|
|
||||||
this.logger.debug('Aggregated metrics', aggregatedMetrics);
|
this.logger.debug('Aggregated metrics', aggregatedMetrics);
|
||||||
|
|
||||||
await this.testRunRepository.markAsCompleted(testRun.id, aggregatedMetrics);
|
await this.testRunRepository.markAsCompleted(testRun.id, aggregatedMetrics);
|
||||||
|
|
||||||
this.logger.debug('Test run finished', { workflowId, testRunId: testRun.id });
|
this.logger.debug('Test run finished', { workflowId, testRunId: testRun.id });
|
||||||
|
|
||||||
testRunEndStatusForTelemetry = 'completed';
|
|
||||||
}
|
}
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
|
telemetryMeta.status = 'fail';
|
||||||
|
|
||||||
if (e instanceof ExecutionCancelledError) {
|
if (e instanceof ExecutionCancelledError) {
|
||||||
this.logger.debug('Evaluation execution was cancelled. Cancelling test run', {
|
this.logger.debug('Evaluation execution was cancelled. Cancelling test run', {
|
||||||
testRunId: testRun.id,
|
testRunId: testRun.id,
|
||||||
@@ -586,25 +605,43 @@ export class TestRunnerService {
|
|||||||
await this.testCaseExecutionRepository.markAllPendingAsCancelled(testRun.id, trx);
|
await this.testCaseExecutionRepository.markAllPendingAsCancelled(testRun.id, trx);
|
||||||
});
|
});
|
||||||
|
|
||||||
testRunEndStatusForTelemetry = 'cancelled';
|
telemetryMeta.status = 'cancelled';
|
||||||
} else if (e instanceof TestRunError) {
|
} else if (e instanceof TestRunError) {
|
||||||
await this.testRunRepository.markAsError(testRun.id, e.code, e.extra as IDataObject);
|
await this.testRunRepository.markAsError(testRun.id, e.code, e.extra as IDataObject);
|
||||||
testRunEndStatusForTelemetry = 'error';
|
telemetryMeta.error_message = e.code;
|
||||||
|
if (e.extra && typeof e.extra === 'object' && 'message' in e.extra) {
|
||||||
|
telemetryMeta.error_message += `: ${String(e.extra.message)}`;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
await this.testRunRepository.markAsError(testRun.id, 'UNKNOWN_ERROR');
|
await this.testRunRepository.markAsError(testRun.id, 'UNKNOWN_ERROR');
|
||||||
testRunEndStatusForTelemetry = 'error';
|
telemetryMeta.error_message = e instanceof Error ? e.message : 'UNKNOWN_ERROR';
|
||||||
throw e;
|
throw e;
|
||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
|
// Calculate duration
|
||||||
|
telemetryMeta.duration = Date.now() - telemetryMeta.start;
|
||||||
|
|
||||||
// Clean up abort controller
|
// Clean up abort controller
|
||||||
this.abortControllers.delete(testRun.id);
|
this.abortControllers.delete(testRun.id);
|
||||||
|
|
||||||
// Send telemetry event
|
// Send telemetry event with complete metadata
|
||||||
this.telemetry.track('Test run finished', {
|
const telemetryPayload: Record<string, GenericValue> = {
|
||||||
workflow_id: workflowId,
|
...telemetryMeta,
|
||||||
run_id: testRun.id,
|
};
|
||||||
status: testRunEndStatusForTelemetry,
|
|
||||||
});
|
// Add success-specific fields
|
||||||
|
if (telemetryMeta.status === 'success') {
|
||||||
|
telemetryPayload.test_case_count = telemetryMeta.test_case_count;
|
||||||
|
telemetryPayload.errored_test_case_count = telemetryMeta.errored_test_case_count;
|
||||||
|
telemetryPayload.metric_count = telemetryMeta.metric_count;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add fail-specific fields
|
||||||
|
if (telemetryMeta.status === 'fail') {
|
||||||
|
telemetryPayload.error_message = telemetryMeta.error_message;
|
||||||
|
}
|
||||||
|
|
||||||
|
this.telemetry.track('Test run finished', telemetryPayload);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -4,12 +4,14 @@ import { useUsageStore } from '@/stores/usage.store';
|
|||||||
import { useAsyncState } from '@vueuse/core';
|
import { useAsyncState } from '@vueuse/core';
|
||||||
import { PLACEHOLDER_EMPTY_WORKFLOW_ID } from '@/constants';
|
import { PLACEHOLDER_EMPTY_WORKFLOW_ID } from '@/constants';
|
||||||
import { useCanvasOperations } from '@/composables/useCanvasOperations';
|
import { useCanvasOperations } from '@/composables/useCanvasOperations';
|
||||||
|
import { useTelemetry } from '@/composables/useTelemetry';
|
||||||
import { useToast } from '@/composables/useToast';
|
import { useToast } from '@/composables/useToast';
|
||||||
import { useI18n } from '@/composables/useI18n';
|
import { useI18n } from '@/composables/useI18n';
|
||||||
import { useRouter } from 'vue-router';
|
import { useRouter } from 'vue-router';
|
||||||
import { useNodeTypesStore } from '@/stores/nodeTypes.store';
|
|
||||||
import { useEvaluationStore } from '@/stores/evaluation.store.ee';
|
import { useEvaluationStore } from '@/stores/evaluation.store.ee';
|
||||||
import { computed } from 'vue';
|
import { useNodeTypesStore } from '@/stores/nodeTypes.store';
|
||||||
|
|
||||||
|
import { computed, watch } from 'vue';
|
||||||
import { N8nLink, N8nText } from '@n8n/design-system';
|
import { N8nLink, N8nText } from '@n8n/design-system';
|
||||||
import EvaluationsPaywall from '@/components/Evaluations.ee/Paywall/EvaluationsPaywall.vue';
|
import EvaluationsPaywall from '@/components/Evaluations.ee/Paywall/EvaluationsPaywall.vue';
|
||||||
import SetupWizard from '@/components/Evaluations.ee/SetupWizard/SetupWizard.vue';
|
import SetupWizard from '@/components/Evaluations.ee/SetupWizard/SetupWizard.vue';
|
||||||
@@ -21,10 +23,11 @@ const props = defineProps<{
|
|||||||
const workflowsStore = useWorkflowsStore();
|
const workflowsStore = useWorkflowsStore();
|
||||||
const usageStore = useUsageStore();
|
const usageStore = useUsageStore();
|
||||||
const evaluationStore = useEvaluationStore();
|
const evaluationStore = useEvaluationStore();
|
||||||
|
const nodeTypesStore = useNodeTypesStore();
|
||||||
|
const telemetry = useTelemetry();
|
||||||
const router = useRouter();
|
const router = useRouter();
|
||||||
const toast = useToast();
|
const toast = useToast();
|
||||||
const locale = useI18n();
|
const locale = useI18n();
|
||||||
const nodeTypesStore = useNodeTypesStore();
|
|
||||||
|
|
||||||
const { initializeWorkspace } = useCanvasOperations({ router });
|
const { initializeWorkspace } = useCanvasOperations({ router });
|
||||||
|
|
||||||
@@ -32,13 +35,18 @@ const evaluationsLicensed = computed(() => {
|
|||||||
return usageStore.workflowsWithEvaluationsLimit !== 0;
|
return usageStore.workflowsWithEvaluationsLimit !== 0;
|
||||||
});
|
});
|
||||||
|
|
||||||
const showWizard = computed(() => {
|
const runs = computed(() => {
|
||||||
const runs = Object.values(evaluationStore.testRunsById ?? {}).filter(
|
return Object.values(evaluationStore.testRunsById ?? {}).filter(
|
||||||
({ workflowId }) => workflowId === props.name,
|
({ workflowId }) => workflowId === props.name,
|
||||||
);
|
);
|
||||||
return runs.length === 0;
|
|
||||||
});
|
});
|
||||||
|
|
||||||
|
const hasRuns = computed(() => {
|
||||||
|
return runs.value.length > 0;
|
||||||
|
});
|
||||||
|
|
||||||
|
const showWizard = computed(() => !hasRuns.value);
|
||||||
|
|
||||||
// Method to run a test - will be used by the SetupWizard component
|
// Method to run a test - will be used by the SetupWizard component
|
||||||
async function runTest() {
|
async function runTest() {
|
||||||
try {
|
try {
|
||||||
@@ -54,6 +62,14 @@ async function runTest() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const evaluationsQuotaExceeded = computed(() => {
|
||||||
|
return (
|
||||||
|
usageStore.workflowsWithEvaluationsLimit !== -1 &&
|
||||||
|
usageStore.workflowsWithEvaluationsCount >= usageStore.workflowsWithEvaluationsLimit &&
|
||||||
|
!hasRuns.value
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
const { isReady } = useAsyncState(async () => {
|
const { isReady } = useAsyncState(async () => {
|
||||||
try {
|
try {
|
||||||
await usageStore.getLicenseInfo();
|
await usageStore.getLicenseInfo();
|
||||||
@@ -83,6 +99,33 @@ const { isReady } = useAsyncState(async () => {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}, undefined);
|
}, undefined);
|
||||||
|
|
||||||
|
watch(
|
||||||
|
isReady,
|
||||||
|
(ready) => {
|
||||||
|
if (ready) {
|
||||||
|
if (showWizard.value) {
|
||||||
|
telemetry.track('User viewed tests tab', {
|
||||||
|
workflow_id: props.name,
|
||||||
|
test_type: 'evaluation',
|
||||||
|
view: 'setup',
|
||||||
|
trigger_set_up: evaluationStore.evaluationTriggerExists,
|
||||||
|
output_set_up: evaluationStore.evaluationSetOutputsNodeExist,
|
||||||
|
metrics_set_up: evaluationStore.evaluationSetMetricsNodeExist,
|
||||||
|
quota_reached: evaluationsQuotaExceeded.value,
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
telemetry.track('User viewed tests tab', {
|
||||||
|
workflow_id: props.name,
|
||||||
|
test_type: 'evaluation',
|
||||||
|
view: 'overview',
|
||||||
|
run_count: runs.value.length,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{ immediate: true },
|
||||||
|
);
|
||||||
</script>
|
</script>
|
||||||
|
|
||||||
<template>
|
<template>
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
import { describe, it, expect, beforeEach } from 'vitest';
|
import { describe, it, expect, beforeEach, vi } from 'vitest';
|
||||||
import { mock } from 'vitest-mock-extended';
|
import { mock } from 'vitest-mock-extended';
|
||||||
import { createTestingPinia } from '@pinia/testing';
|
import { createTestingPinia } from '@pinia/testing';
|
||||||
import { createComponentRenderer } from '@/__tests__/render';
|
import { createComponentRenderer } from '@/__tests__/render';
|
||||||
@@ -6,11 +6,30 @@ import EvaluationRootView from '../EvaluationsRootView.vue';
|
|||||||
|
|
||||||
import { useWorkflowsStore } from '@/stores/workflows.store';
|
import { useWorkflowsStore } from '@/stores/workflows.store';
|
||||||
import { useEvaluationStore } from '@/stores/evaluation.store.ee';
|
import { useEvaluationStore } from '@/stores/evaluation.store.ee';
|
||||||
|
import { useUsageStore } from '@/stores/usage.store';
|
||||||
import { mockedStore } from '@/__tests__/utils';
|
import { mockedStore } from '@/__tests__/utils';
|
||||||
import type { IWorkflowDb } from '@/Interface';
|
import type { IWorkflowDb } from '@/Interface';
|
||||||
import { waitFor } from '@testing-library/vue';
|
import { waitFor } from '@testing-library/vue';
|
||||||
import type { TestRunRecord } from '@/api/evaluation.ee';
|
import type { TestRunRecord } from '@/api/evaluation.ee';
|
||||||
import { PLACEHOLDER_EMPTY_WORKFLOW_ID } from '@/constants';
|
import { PLACEHOLDER_EMPTY_WORKFLOW_ID } from '@/constants';
|
||||||
|
import { useTelemetry } from '@/composables/useTelemetry';
|
||||||
|
import { EVALUATION_NODE_TYPE, EVALUATION_TRIGGER_NODE_TYPE, NodeHelpers } from 'n8n-workflow';
|
||||||
|
|
||||||
|
vi.mock('@/composables/useTelemetry', () => {
|
||||||
|
const track = vi.fn();
|
||||||
|
return {
|
||||||
|
useTelemetry: () => ({
|
||||||
|
track,
|
||||||
|
}),
|
||||||
|
};
|
||||||
|
});
|
||||||
|
|
||||||
|
const getNodeType = vi.fn();
|
||||||
|
vi.mock('@/stores/nodeTypes.store', () => ({
|
||||||
|
useNodeTypesStore: vi.fn(() => ({
|
||||||
|
getNodeType,
|
||||||
|
})),
|
||||||
|
}));
|
||||||
|
|
||||||
describe('EvaluationsRootView', () => {
|
describe('EvaluationsRootView', () => {
|
||||||
const renderComponent = createComponentRenderer(EvaluationRootView);
|
const renderComponent = createComponentRenderer(EvaluationRootView);
|
||||||
@@ -37,6 +56,21 @@ describe('EvaluationsRootView', () => {
|
|||||||
|
|
||||||
beforeEach(() => {
|
beforeEach(() => {
|
||||||
createTestingPinia();
|
createTestingPinia();
|
||||||
|
vi.clearAllMocks();
|
||||||
|
|
||||||
|
vi.spyOn(NodeHelpers, 'getNodeParameters').mockReturnValue({
|
||||||
|
assignments: {
|
||||||
|
assignments: [
|
||||||
|
{
|
||||||
|
id: 'xxxxx',
|
||||||
|
name: '=',
|
||||||
|
value: '',
|
||||||
|
type: 'string',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
options: {},
|
||||||
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should initialize workflow on mount if not already initialized', async () => {
|
it('should initialize workflow on mount if not already initialized', async () => {
|
||||||
@@ -91,4 +125,231 @@ describe('EvaluationsRootView', () => {
|
|||||||
|
|
||||||
await waitFor(() => expect(container.querySelector('.setupContent')).toBeTruthy());
|
await waitFor(() => expect(container.querySelector('.setupContent')).toBeTruthy());
|
||||||
});
|
});
|
||||||
|
|
||||||
|
describe('telemetry', () => {
|
||||||
|
it('should send telemetry event on mount with setup view when no test runs exist', async () => {
|
||||||
|
const workflowsStore = mockedStore(useWorkflowsStore);
|
||||||
|
const evaluationStore = mockedStore(useEvaluationStore);
|
||||||
|
const usageStore = mockedStore(useUsageStore);
|
||||||
|
|
||||||
|
workflowsStore.workflow = mockWorkflow;
|
||||||
|
evaluationStore.testRunsById = {};
|
||||||
|
usageStore.workflowsWithEvaluationsLimit = 10;
|
||||||
|
usageStore.workflowsWithEvaluationsCount = 0;
|
||||||
|
|
||||||
|
// Mock no evaluation nodes in workflow
|
||||||
|
getNodeType.mockReturnValue(null);
|
||||||
|
|
||||||
|
renderComponent({ props: { name: mockWorkflow.id } });
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(useTelemetry().track).toHaveBeenCalledWith('User viewed tests tab', {
|
||||||
|
workflow_id: mockWorkflow.id,
|
||||||
|
test_type: 'evaluation',
|
||||||
|
view: 'setup',
|
||||||
|
trigger_set_up: false,
|
||||||
|
output_set_up: false,
|
||||||
|
metrics_set_up: false,
|
||||||
|
quota_reached: false,
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should send telemetry event on mount with overview view when test runs exist', async () => {
|
||||||
|
const workflowsStore = mockedStore(useWorkflowsStore);
|
||||||
|
const evaluationStore = mockedStore(useEvaluationStore);
|
||||||
|
const usageStore = mockedStore(useUsageStore);
|
||||||
|
|
||||||
|
workflowsStore.workflow = mockWorkflow;
|
||||||
|
evaluationStore.testRunsById = {
|
||||||
|
run1: mock<TestRunRecord>({ workflowId: mockWorkflow.id }),
|
||||||
|
run2: mock<TestRunRecord>({ workflowId: mockWorkflow.id }),
|
||||||
|
};
|
||||||
|
usageStore.workflowsWithEvaluationsLimit = 10;
|
||||||
|
usageStore.workflowsWithEvaluationsCount = 1;
|
||||||
|
|
||||||
|
renderComponent({ props: { name: mockWorkflow.id } });
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(useTelemetry().track).toHaveBeenCalledWith('User viewed tests tab', {
|
||||||
|
workflow_id: mockWorkflow.id,
|
||||||
|
test_type: 'evaluation',
|
||||||
|
view: 'overview',
|
||||||
|
run_count: 2,
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should send telemetry event with trigger_set_up true when dataset trigger node exists', async () => {
|
||||||
|
const workflowsStore = mockedStore(useWorkflowsStore);
|
||||||
|
const evaluationStore = mockedStore(useEvaluationStore);
|
||||||
|
const usageStore = mockedStore(useUsageStore);
|
||||||
|
|
||||||
|
const workflowWithTrigger = mock<IWorkflowDb>({
|
||||||
|
...mockWorkflow,
|
||||||
|
nodes: [
|
||||||
|
{
|
||||||
|
id: 'trigger1',
|
||||||
|
name: 'Dataset Trigger',
|
||||||
|
type: EVALUATION_TRIGGER_NODE_TYPE,
|
||||||
|
typeVersion: 1,
|
||||||
|
position: [0, 0],
|
||||||
|
parameters: {},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
});
|
||||||
|
|
||||||
|
workflowsStore.workflow = workflowWithTrigger;
|
||||||
|
evaluationStore.testRunsById = {};
|
||||||
|
usageStore.workflowsWithEvaluationsLimit = 10;
|
||||||
|
usageStore.workflowsWithEvaluationsCount = 0;
|
||||||
|
|
||||||
|
// Mock dataset trigger node type exists
|
||||||
|
getNodeType.mockImplementation((nodeType) =>
|
||||||
|
nodeType === EVALUATION_TRIGGER_NODE_TYPE ? { name: EVALUATION_TRIGGER_NODE_TYPE } : null,
|
||||||
|
);
|
||||||
|
|
||||||
|
renderComponent({ props: { name: mockWorkflow.id } });
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(useTelemetry().track).toHaveBeenCalledWith('User viewed tests tab', {
|
||||||
|
workflow_id: mockWorkflow.id,
|
||||||
|
test_type: 'evaluation',
|
||||||
|
view: 'setup',
|
||||||
|
trigger_set_up: true,
|
||||||
|
output_set_up: false,
|
||||||
|
metrics_set_up: false,
|
||||||
|
quota_reached: false,
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should send telemetry event with output_set_up true when evaluation set output node exists', async () => {
|
||||||
|
const workflowsStore = mockedStore(useWorkflowsStore);
|
||||||
|
const evaluationStore = mockedStore(useEvaluationStore);
|
||||||
|
const usageStore = mockedStore(useUsageStore);
|
||||||
|
|
||||||
|
const workflowWithOutputNode = mock<IWorkflowDb>({
|
||||||
|
...mockWorkflow,
|
||||||
|
nodes: [
|
||||||
|
{
|
||||||
|
id: 'output1',
|
||||||
|
name: 'Set Outputs',
|
||||||
|
type: EVALUATION_NODE_TYPE,
|
||||||
|
typeVersion: 1,
|
||||||
|
position: [0, 0],
|
||||||
|
parameters: {
|
||||||
|
operation: 'setOutputs',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
});
|
||||||
|
|
||||||
|
vi.spyOn(NodeHelpers, 'getNodeParameters').mockReturnValue({
|
||||||
|
operation: 'setOutputs',
|
||||||
|
});
|
||||||
|
|
||||||
|
workflowsStore.workflow = workflowWithOutputNode;
|
||||||
|
evaluationStore.testRunsById = {};
|
||||||
|
usageStore.workflowsWithEvaluationsLimit = 10;
|
||||||
|
usageStore.workflowsWithEvaluationsCount = 0;
|
||||||
|
|
||||||
|
// Mock evaluation node type exists
|
||||||
|
getNodeType.mockImplementation((nodeType) =>
|
||||||
|
nodeType === EVALUATION_NODE_TYPE ? { name: EVALUATION_NODE_TYPE } : null,
|
||||||
|
);
|
||||||
|
|
||||||
|
renderComponent({ props: { name: mockWorkflow.id } });
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(useTelemetry().track).toHaveBeenCalledWith('User viewed tests tab', {
|
||||||
|
workflow_id: mockWorkflow.id,
|
||||||
|
test_type: 'evaluation',
|
||||||
|
view: 'setup',
|
||||||
|
trigger_set_up: false,
|
||||||
|
output_set_up: true,
|
||||||
|
metrics_set_up: false,
|
||||||
|
quota_reached: false,
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should send telemetry event with metrics_set_up true when evaluation metrics node exists', async () => {
|
||||||
|
const workflowsStore = mockedStore(useWorkflowsStore);
|
||||||
|
const evaluationStore = mockedStore(useEvaluationStore);
|
||||||
|
const usageStore = mockedStore(useUsageStore);
|
||||||
|
|
||||||
|
const workflowWithMetricsNode = mock<IWorkflowDb>({
|
||||||
|
...mockWorkflow,
|
||||||
|
nodes: [
|
||||||
|
{
|
||||||
|
id: 'metrics1',
|
||||||
|
name: 'Set Metrics',
|
||||||
|
type: EVALUATION_NODE_TYPE,
|
||||||
|
typeVersion: 1,
|
||||||
|
position: [0, 0],
|
||||||
|
parameters: {
|
||||||
|
operation: 'setMetrics',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
});
|
||||||
|
|
||||||
|
vi.spyOn(NodeHelpers, 'getNodeParameters').mockReturnValue({
|
||||||
|
operation: 'setMetrics',
|
||||||
|
});
|
||||||
|
|
||||||
|
workflowsStore.workflow = workflowWithMetricsNode;
|
||||||
|
evaluationStore.testRunsById = {};
|
||||||
|
usageStore.workflowsWithEvaluationsLimit = 10;
|
||||||
|
usageStore.workflowsWithEvaluationsCount = 0;
|
||||||
|
|
||||||
|
// Mock evaluation node type exists
|
||||||
|
getNodeType.mockImplementation((nodeType) =>
|
||||||
|
nodeType === EVALUATION_NODE_TYPE ? { name: EVALUATION_NODE_TYPE } : null,
|
||||||
|
);
|
||||||
|
|
||||||
|
renderComponent({ props: { name: mockWorkflow.id } });
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(useTelemetry().track).toHaveBeenCalledWith('User viewed tests tab', {
|
||||||
|
workflow_id: mockWorkflow.id,
|
||||||
|
test_type: 'evaluation',
|
||||||
|
view: 'setup',
|
||||||
|
trigger_set_up: false,
|
||||||
|
output_set_up: false,
|
||||||
|
metrics_set_up: true,
|
||||||
|
quota_reached: false,
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should send telemetry event with quota_reached true when evaluations quota is exceeded', async () => {
|
||||||
|
const workflowsStore = mockedStore(useWorkflowsStore);
|
||||||
|
const evaluationStore = mockedStore(useEvaluationStore);
|
||||||
|
const usageStore = mockedStore(useUsageStore);
|
||||||
|
|
||||||
|
workflowsStore.workflow = mockWorkflow;
|
||||||
|
evaluationStore.testRunsById = {};
|
||||||
|
usageStore.workflowsWithEvaluationsLimit = 5;
|
||||||
|
usageStore.workflowsWithEvaluationsCount = 5; // At limit
|
||||||
|
|
||||||
|
// Mock no evaluation nodes in workflow
|
||||||
|
getNodeType.mockReturnValue(null);
|
||||||
|
|
||||||
|
renderComponent({ props: { name: mockWorkflow.id } });
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(useTelemetry().track).toHaveBeenCalledWith('User viewed tests tab', {
|
||||||
|
workflow_id: mockWorkflow.id,
|
||||||
|
test_type: 'evaluation',
|
||||||
|
view: 'setup',
|
||||||
|
trigger_set_up: false,
|
||||||
|
output_set_up: false,
|
||||||
|
metrics_set_up: false,
|
||||||
|
quota_reached: true,
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
});
|
});
|
||||||
|
|||||||
Reference in New Issue
Block a user