diff --git a/packages/@n8n/api-types/src/frontend-settings.ts b/packages/@n8n/api-types/src/frontend-settings.ts index ab436703da..a3a87589af 100644 --- a/packages/@n8n/api-types/src/frontend-settings.ts +++ b/packages/@n8n/api-types/src/frontend-settings.ts @@ -195,4 +195,7 @@ export interface FrontendSettings { dashboard: boolean; dateRanges: InsightsDateRange[]; }; + evaluation: { + quota: number; + }; } diff --git a/packages/@n8n/backend-common/src/license-state.ts b/packages/@n8n/backend-common/src/license-state.ts index ad050763cf..b2ad56f844 100644 --- a/packages/@n8n/backend-common/src/license-state.ts +++ b/packages/@n8n/backend-common/src/license-state.ts @@ -189,4 +189,8 @@ export class LicenseState { getMaxTeamProjects() { return this.getValue('quota:maxTeamProjects') ?? 0; } + + getMaxWorkflowsWithEvaluations() { + return this.getValue('quota:evaluations:maxWorkflows') ?? 0; + } } diff --git a/packages/@n8n/constants/src/index.ts b/packages/@n8n/constants/src/index.ts index 0fad17020f..d9ed5b2b02 100644 --- a/packages/@n8n/constants/src/index.ts +++ b/packages/@n8n/constants/src/index.ts @@ -39,6 +39,7 @@ export const LICENSE_QUOTAS = { INSIGHTS_MAX_HISTORY_DAYS: 'quota:insights:maxHistoryDays', INSIGHTS_RETENTION_MAX_AGE_DAYS: 'quota:insights:retention:maxAgeDays', INSIGHTS_RETENTION_PRUNE_INTERVAL_DAYS: 'quota:insights:retention:pruneIntervalDays', + WORKFLOWS_WITH_EVALUATION_LIMIT: 'quota:evaluations:maxWorkflows', } as const; export const UNLIMITED_LICENSE_QUOTA = -1; diff --git a/packages/@n8n/db/src/entities/types-db.ts b/packages/@n8n/db/src/entities/types-db.ts index b93c018733..b8533e9668 100644 --- a/packages/@n8n/db/src/entities/types-db.ts +++ b/packages/@n8n/db/src/entities/types-db.ts @@ -286,9 +286,17 @@ export type TestRunErrorCode = | 'TEST_CASES_NOT_FOUND' | 'INTERRUPTED' | 'UNKNOWN_ERROR' - | 'EVALUATION_TRIGGER_NOT_FOUND'; + | 'EVALUATION_TRIGGER_NOT_FOUND' + | 'EVALUATION_TRIGGER_NOT_CONFIGURED' + | 'EVALUATION_TRIGGER_DISABLED' + | 'SET_OUTPUTS_NODE_NOT_FOUND' + | 'SET_OUTPUTS_NODE_NOT_CONFIGURED' + | 'SET_METRICS_NODE_NOT_FOUND' + | 'SET_METRICS_NODE_NOT_CONFIGURED' + | 'CANT_FETCH_TEST_CASES'; export type TestCaseExecutionErrorCode = + | 'NO_METRICS_COLLECTED' | 'MOCKED_NODE_NOT_FOUND' // This will be used when node mocking will be implemented | 'FAILED_TO_EXECUTE_WORKFLOW' | 'INVALID_METRICS' diff --git a/packages/@n8n/db/src/repositories/license-metrics.repository.ts b/packages/@n8n/db/src/repositories/license-metrics.repository.ts index e9c056c3d4..c20c0c1091 100644 --- a/packages/@n8n/db/src/repositories/license-metrics.repository.ts +++ b/packages/@n8n/db/src/repositories/license-metrics.repository.ts @@ -33,12 +33,14 @@ export class LicenseMetricsRepository extends Repository { production_executions_count: string | number; production_root_executions_count: string | number; manual_executions_count: string | number; + evaluations_count: string | number; }; const userTable = this.toTableName('user'); const workflowTable = this.toTableName('workflow_entity'); const credentialTable = this.toTableName('credentials_entity'); const workflowStatsTable = this.toTableName('workflow_statistics'); + const testRunTable = this.toTableName('test_run'); const [ { @@ -50,6 +52,7 @@ export class LicenseMetricsRepository extends Repository { production_executions_count: productionExecutions, production_root_executions_count: productionRootExecutions, manual_executions_count: manualExecutions, + evaluations_count: evaluations, }, ] = (await this.query(` SELECT @@ -60,7 +63,8 @@ export class LicenseMetricsRepository extends Repository { (SELECT COUNT(*) FROM ${credentialTable}) AS total_credentials_count, (SELECT SUM(count) FROM ${workflowStatsTable} WHERE name IN ('production_success', 'production_error')) AS production_executions_count, (SELECT SUM(${this.toColumnName('rootCount')}) FROM ${workflowStatsTable} WHERE name IN ('production_success', 'production_error')) AS production_root_executions_count, - (SELECT SUM(count) FROM ${workflowStatsTable} WHERE name IN ('manual_success', 'manual_error')) AS manual_executions_count; + (SELECT SUM(count) FROM ${workflowStatsTable} WHERE name IN ('manual_success', 'manual_error')) AS manual_executions_count, + (SELECT COUNT(distinct ${this.toColumnName('workflowId')}) FROM ${testRunTable}) AS evaluations_count; `)) as Row[]; const toNumber = (value: string | number) => @@ -75,6 +79,7 @@ export class LicenseMetricsRepository extends Repository { productionExecutions: toNumber(productionExecutions), productionRootExecutions: toNumber(productionRootExecutions), manualExecutions: toNumber(manualExecutions), + evaluations: toNumber(evaluations), }; } } diff --git a/packages/@n8n/db/src/repositories/workflow.repository.ts b/packages/@n8n/db/src/repositories/workflow.repository.ts index ebaf12f826..500bbf2dbd 100644 --- a/packages/@n8n/db/src/repositories/workflow.repository.ts +++ b/packages/@n8n/db/src/repositories/workflow.repository.ts @@ -128,6 +128,16 @@ export class WorkflowRepository extends Repository { .execute(); } + async getWorkflowsWithEvaluationCount() { + // Count workflows having test runs + const totalWorkflowCount = await this.createQueryBuilder('workflow') + .innerJoin('workflow.testRuns', 'testrun') + .distinct(true) + .getCount(); + + return totalWorkflowCount ?? 0; + } + private buildBaseUnionQuery(workflowIds: string[], options: ListQuery.Options = {}) { const subQueryParameters: ListQuery.Options = { select: { diff --git a/packages/cli/src/constants.ts b/packages/cli/src/constants.ts index 1d623f2480..944887091a 100644 --- a/packages/cli/src/constants.ts +++ b/packages/cli/src/constants.ts @@ -157,6 +157,3 @@ export const WsStatusCodes = { } as const; export const FREE_AI_CREDITS_CREDENTIAL_NAME = 'n8n free OpenAI API credits'; - -export const EVALUATION_NODE = `${NODE_PACKAGE_PREFIX}base.evaluation`; -export const EVALUATION_DATASET_TRIGGER_NODE = `${NODE_PACKAGE_PREFIX}base.evaluationTrigger`; diff --git a/packages/cli/src/controllers/e2e.controller.ts b/packages/cli/src/controllers/e2e.controller.ts index 72d0e3098c..6dbe37aaf0 100644 --- a/packages/cli/src/controllers/e2e.controller.ts +++ b/packages/cli/src/controllers/e2e.controller.ts @@ -117,6 +117,7 @@ export class E2EController { [LICENSE_QUOTAS.INSIGHTS_MAX_HISTORY_DAYS]: 7, [LICENSE_QUOTAS.INSIGHTS_RETENTION_MAX_AGE_DAYS]: 30, [LICENSE_QUOTAS.INSIGHTS_RETENTION_PRUNE_INTERVAL_DAYS]: 180, + [LICENSE_QUOTAS.WORKFLOWS_WITH_EVALUATION_LIMIT]: 1, }; private numericFeatures: Record = { @@ -137,6 +138,8 @@ export class E2EController { E2EController.numericFeaturesDefaults[LICENSE_QUOTAS.INSIGHTS_RETENTION_MAX_AGE_DAYS], [LICENSE_QUOTAS.INSIGHTS_RETENTION_PRUNE_INTERVAL_DAYS]: E2EController.numericFeaturesDefaults[LICENSE_QUOTAS.INSIGHTS_RETENTION_PRUNE_INTERVAL_DAYS], + [LICENSE_QUOTAS.WORKFLOWS_WITH_EVALUATION_LIMIT]: + E2EController.numericFeaturesDefaults[LICENSE_QUOTAS.WORKFLOWS_WITH_EVALUATION_LIMIT], }; constructor( diff --git a/packages/cli/src/evaluation.ee/test-runner/__tests__/test-runner.service.ee.test.ts b/packages/cli/src/evaluation.ee/test-runner/__tests__/test-runner.service.ee.test.ts index bf55ab7bc3..e8aeddcfbc 100644 --- a/packages/cli/src/evaluation.ee/test-runner/__tests__/test-runner.service.ee.test.ts +++ b/packages/cli/src/evaluation.ee/test-runner/__tests__/test-runner.service.ee.test.ts @@ -5,12 +5,12 @@ import type { WorkflowRepository } from '@n8n/db'; import { readFileSync } from 'fs'; import { mock } from 'jest-mock-extended'; import type { ErrorReporter } from 'n8n-core'; +import { EVALUATION_NODE_TYPE, EVALUATION_TRIGGER_NODE_TYPE } from 'n8n-workflow'; import type { IWorkflowBase } from 'n8n-workflow'; -import type { IRun } from 'n8n-workflow'; +import type { IRun, ExecutionError } from 'n8n-workflow'; import path from 'path'; import type { ActiveExecutions } from '@/active-executions'; -import { EVALUATION_DATASET_TRIGGER_NODE } from '@/constants'; import { TestRunError } from '@/evaluation.ee/test-runner/errors.ee'; import { LoadNodesAndCredentials } from '@/load-nodes-and-credentials'; import type { Telemetry } from '@/telemetry'; @@ -59,7 +59,7 @@ describe('TestRunnerService', () => { jest.resetAllMocks(); }); - describe('findTriggerNode', () => { + describe('findEvaluationTriggerNode', () => { test('should find the trigger node in a workflow', () => { // Setup a test workflow with a trigger node const workflowWithTrigger = mock({ @@ -67,7 +67,7 @@ describe('TestRunnerService', () => { { id: 'node1', name: 'Dataset Trigger', - type: EVALUATION_DATASET_TRIGGER_NODE, + type: EVALUATION_TRIGGER_NODE_TYPE, typeVersion: 1, position: [0, 0], parameters: {}, @@ -85,11 +85,11 @@ describe('TestRunnerService', () => { }); // Use the protected method via any type casting - const result = (testRunnerService as any).findTriggerNode(workflowWithTrigger); + const result = (testRunnerService as any).findEvaluationTriggerNode(workflowWithTrigger); // Assert the result is the correct node expect(result).toBeDefined(); - expect(result.type).toBe(EVALUATION_DATASET_TRIGGER_NODE); + expect(result.type).toBe(EVALUATION_TRIGGER_NODE_TYPE); expect(result.name).toBe('Dataset Trigger'); }); @@ -118,16 +118,16 @@ describe('TestRunnerService', () => { }); // Call the function and expect undefined result - const result = (testRunnerService as any).findTriggerNode(workflowWithoutTrigger); + const result = (testRunnerService as any).findEvaluationTriggerNode(workflowWithoutTrigger); expect(result).toBeUndefined(); }); test('should work with the actual workflow.under-test.json', () => { - const result = (testRunnerService as any).findTriggerNode(wfUnderTestJson); + const result = (testRunnerService as any).findEvaluationTriggerNode(wfUnderTestJson); // Assert the result is the correct node expect(result).toBeDefined(); - expect(result.type).toBe(EVALUATION_DATASET_TRIGGER_NODE); + expect(result.type).toBe(EVALUATION_TRIGGER_NODE_TYPE); expect(result.name).toBe('When fetching a dataset row'); }); }); @@ -140,7 +140,7 @@ describe('TestRunnerService', () => { { id: 'triggerNodeId', name: 'TriggerNode', - type: EVALUATION_DATASET_TRIGGER_NODE, + type: EVALUATION_TRIGGER_NODE_TYPE, typeVersion: 1, position: [0, 0], parameters: {}, @@ -164,6 +164,7 @@ describe('TestRunnerService', () => { data: { main: [mockOutputItems], }, + error: undefined, }, ], }, @@ -185,7 +186,7 @@ describe('TestRunnerService', () => { { id: 'triggerNodeId', name: 'TriggerNode', - type: EVALUATION_DATASET_TRIGGER_NODE, + type: EVALUATION_TRIGGER_NODE_TYPE, typeVersion: 1, position: [0, 0], parameters: {}, @@ -217,6 +218,51 @@ describe('TestRunnerService', () => { } }); + test('should throw an error if evaluation trigger could not fetch data', () => { + // Create workflow with a trigger node + const workflow = mock({ + nodes: [ + { + id: 'triggerNodeId', + name: 'TriggerNode', + type: EVALUATION_TRIGGER_NODE_TYPE, + typeVersion: 1, + position: [0, 0], + parameters: {}, + }, + ], + connections: {}, + }); + + // Create execution data with missing output + const execution = mock({ + data: { + resultData: { + runData: { + TriggerNode: [ + { + error: mock(), + }, + ], + }, + }, + }, + }); + + // Expect the method to throw an error + expect(() => { + (testRunnerService as any).extractDatasetTriggerOutput(execution, workflow); + }).toThrow(TestRunError); + + // Verify the error has the correct code + try { + (testRunnerService as any).extractDatasetTriggerOutput(execution, workflow); + } catch (error) { + expect(error).toBeInstanceOf(TestRunError); + expect(error.code).toBe('CANT_FETCH_TEST_CASES'); + } + }); + test('should throw an error if trigger node output is empty list', () => { // Create workflow with a trigger node const workflow = mock({ @@ -224,7 +270,7 @@ describe('TestRunnerService', () => { { id: 'triggerNodeId', name: 'TriggerNode', - type: EVALUATION_DATASET_TRIGGER_NODE, + type: EVALUATION_TRIGGER_NODE_TYPE, typeVersion: 1, position: [0, 0], parameters: {}, @@ -243,6 +289,7 @@ describe('TestRunnerService', () => { data: { main: [[]], // Empty list }, + error: undefined, }, ], }, @@ -271,7 +318,7 @@ describe('TestRunnerService', () => { { id: 'triggerNodeId', name: "When clicking 'Execute workflow'", - type: EVALUATION_DATASET_TRIGGER_NODE, + type: EVALUATION_TRIGGER_NODE_TYPE, typeVersion: 1, position: [0, 0], parameters: {}, @@ -297,6 +344,7 @@ describe('TestRunnerService', () => { data: { main: [expectedItems], }, + error: undefined, }, ], }, @@ -374,7 +422,7 @@ describe('TestRunnerService', () => { { id: 'node1', name: triggerNodeName, - type: EVALUATION_DATASET_TRIGGER_NODE, + type: EVALUATION_TRIGGER_NODE_TYPE, typeVersion: 1, position: [0, 0], parameters: {}, @@ -427,7 +475,7 @@ describe('TestRunnerService', () => { { id: 'node1', name: triggerNodeName, - type: EVALUATION_DATASET_TRIGGER_NODE, + type: EVALUATION_TRIGGER_NODE_TYPE, typeVersion: 1, position: [0, 0], parameters: {}, @@ -531,7 +579,7 @@ describe('TestRunnerService', () => { { id: 'node1', name: triggerNodeName, - type: EVALUATION_DATASET_TRIGGER_NODE, + type: EVALUATION_TRIGGER_NODE_TYPE, typeVersion: 1, position: [0, 0], parameters: {}, @@ -586,7 +634,7 @@ describe('TestRunnerService', () => { { id: 'node1', name: triggerNodeName, - type: EVALUATION_DATASET_TRIGGER_NODE, + type: EVALUATION_TRIGGER_NODE_TYPE, typeVersion: 1, position: [0, 0], parameters: {}, @@ -632,4 +680,554 @@ describe('TestRunnerService', () => { } }); }); + + describe('validateSetMetricsNodes', () => { + it('should pass when metrics nodes are properly configured', () => { + const workflow = mock({ + nodes: [ + { + id: 'node1', + name: 'Set Metrics', + type: EVALUATION_NODE_TYPE, + typeVersion: 1, + position: [0, 0], + parameters: { + operation: 'setMetrics', + metrics: { + assignments: [ + { + id: '1', + name: 'accuracy', + value: 0.95, + }, + { + id: '2', + name: 'precision', + value: 0.87, + }, + ], + }, + }, + }, + ], + connections: {}, + }); + + expect(() => { + (testRunnerService as any).validateSetMetricsNodes(workflow); + }).not.toThrow(); + }); + + it('should throw SET_METRICS_NODE_NOT_FOUND when no metrics nodes exist', () => { + const workflow = mock({ + nodes: [ + { + id: 'node1', + name: 'Regular Node', + type: 'n8n-nodes-base.noOp', + typeVersion: 1, + position: [0, 0], + parameters: {}, + }, + ], + connections: {}, + }); + + expect(() => { + (testRunnerService as any).validateSetMetricsNodes(workflow); + }).toThrow(TestRunError); + + try { + (testRunnerService as any).validateSetMetricsNodes(workflow); + } catch (error) { + expect(error).toBeInstanceOf(TestRunError); + expect(error.code).toBe('SET_METRICS_NODE_NOT_FOUND'); + } + }); + + it('should throw SET_METRICS_NODE_NOT_CONFIGURED when metrics node has no parameters', () => { + const workflow = mock({ + nodes: [ + { + id: 'node1', + name: 'Set Metrics', + type: EVALUATION_NODE_TYPE, + typeVersion: 1, + position: [0, 0], + parameters: { + operation: 'setMetrics', + metrics: undefined, + }, + }, + ], + connections: {}, + }); + + expect(() => { + (testRunnerService as any).validateSetMetricsNodes(workflow); + }).toThrow(TestRunError); + + try { + (testRunnerService as any).validateSetMetricsNodes(workflow); + } catch (error) { + expect(error).toBeInstanceOf(TestRunError); + expect(error.code).toBe('SET_METRICS_NODE_NOT_CONFIGURED'); + expect(error.extra).toEqual({ node_name: 'Set Metrics' }); + } + }); + + it('should throw SET_METRICS_NODE_NOT_CONFIGURED when metrics node has empty assignments', () => { + const workflow = mock({ + nodes: [ + { + id: 'node1', + name: 'Set Metrics', + type: EVALUATION_NODE_TYPE, + typeVersion: 1, + position: [0, 0], + parameters: { + operation: 'setMetrics', + metrics: { + assignments: [], + }, + }, + }, + ], + connections: {}, + }); + + expect(() => { + (testRunnerService as any).validateSetMetricsNodes(workflow); + }).toThrow(TestRunError); + + try { + (testRunnerService as any).validateSetMetricsNodes(workflow); + } catch (error) { + expect(error).toBeInstanceOf(TestRunError); + expect(error.code).toBe('SET_METRICS_NODE_NOT_CONFIGURED'); + expect(error.extra).toEqual({ node_name: 'Set Metrics' }); + } + }); + + it('should throw SET_METRICS_NODE_NOT_CONFIGURED when assignment has no name', () => { + const workflow = mock({ + nodes: [ + { + id: 'node1', + name: 'Set Metrics', + type: EVALUATION_NODE_TYPE, + typeVersion: 1, + position: [0, 0], + parameters: { + operation: 'setMetrics', + metrics: { + assignments: [ + { + id: '1', + name: '', + value: 0.95, + }, + ], + }, + }, + }, + ], + connections: {}, + }); + + expect(() => { + (testRunnerService as any).validateSetMetricsNodes(workflow); + }).toThrow(TestRunError); + + try { + (testRunnerService as any).validateSetMetricsNodes(workflow); + } catch (error) { + expect(error).toBeInstanceOf(TestRunError); + expect(error.code).toBe('SET_METRICS_NODE_NOT_CONFIGURED'); + expect(error.extra).toEqual({ node_name: 'Set Metrics' }); + } + }); + + it('should throw SET_METRICS_NODE_NOT_CONFIGURED when assignment has null value', () => { + const workflow = mock({ + nodes: [ + { + id: 'node1', + name: 'Set Metrics', + type: EVALUATION_NODE_TYPE, + typeVersion: 1, + position: [0, 0], + parameters: { + operation: 'setMetrics', + metrics: { + assignments: [ + { + id: '1', + name: 'accuracy', + value: null, + }, + ], + }, + }, + }, + ], + connections: {}, + }); + + expect(() => { + (testRunnerService as any).validateSetMetricsNodes(workflow); + }).toThrow(TestRunError); + + try { + (testRunnerService as any).validateSetMetricsNodes(workflow); + } catch (error) { + expect(error).toBeInstanceOf(TestRunError); + expect(error.code).toBe('SET_METRICS_NODE_NOT_CONFIGURED'); + expect(error.extra).toEqual({ node_name: 'Set Metrics' }); + } + }); + + it('should validate multiple metrics nodes successfully', () => { + const workflow = mock({ + nodes: [ + { + id: 'node1', + name: 'Set Metrics 1', + type: EVALUATION_NODE_TYPE, + typeVersion: 1, + position: [0, 0], + parameters: { + operation: 'setMetrics', + metrics: { + assignments: [ + { + id: '1', + name: 'accuracy', + value: 0.95, + }, + ], + }, + }, + }, + { + id: 'node2', + name: 'Set Metrics 2', + type: EVALUATION_NODE_TYPE, + typeVersion: 1, + position: [100, 0], + parameters: { + operation: 'setMetrics', + metrics: { + assignments: [ + { + id: '2', + name: 'precision', + value: 0.87, + }, + ], + }, + }, + }, + ], + connections: {}, + }); + + expect(() => { + (testRunnerService as any).validateSetMetricsNodes(workflow); + }).not.toThrow(); + }); + }); + + describe('validateSetOutputsNodes', () => { + it('should pass when outputs nodes are properly configured', () => { + const workflow = mock({ + nodes: [ + { + id: 'node1', + name: 'Set Outputs', + type: EVALUATION_NODE_TYPE, + typeVersion: 1, + position: [0, 0], + parameters: { + operation: 'setOutputs', + outputs: { + assignments: [ + { + id: '1', + name: 'result', + value: 'success', + }, + { + id: '2', + name: 'score', + value: 95, + }, + ], + }, + }, + }, + ], + connections: {}, + }); + + expect(() => { + (testRunnerService as any).validateSetOutputsNodes(workflow); + }).not.toThrow(); + }); + + it('should pass when operation is default (undefined)', () => { + const workflow = mock({ + nodes: [ + { + id: 'node1', + name: 'Set Outputs', + type: EVALUATION_NODE_TYPE, + typeVersion: 1, + position: [0, 0], + parameters: { + operation: undefined, + outputs: { + assignments: [ + { + id: '1', + name: 'result', + value: 'success', + }, + { + id: '2', + name: 'score', + value: 95, + }, + ], + }, + }, + }, + ], + connections: {}, + }); + + expect(() => { + (testRunnerService as any).validateSetOutputsNodes(workflow); + }).not.toThrow(); + }); + + it('should throw SET_OUTPUTS_NODE_NOT_FOUND when no outputs nodes exist', () => { + const workflow = mock({ + nodes: [ + { + id: 'node1', + name: 'Regular Node', + type: 'n8n-nodes-base.noOp', + typeVersion: 1, + position: [0, 0], + parameters: {}, + }, + ], + connections: {}, + }); + + expect(() => { + (testRunnerService as any).validateSetOutputsNodes(workflow); + }).toThrow(TestRunError); + + try { + (testRunnerService as any).validateSetOutputsNodes(workflow); + } catch (error) { + expect(error).toBeInstanceOf(TestRunError); + expect(error.code).toBe('SET_OUTPUTS_NODE_NOT_FOUND'); + } + }); + + it('should throw SET_OUTPUTS_NODE_NOT_CONFIGURED when outputs node has no parameters', () => { + const workflow = mock({ + nodes: [ + { + id: 'node1', + name: 'Set Outputs', + type: EVALUATION_NODE_TYPE, + typeVersion: 1, + position: [0, 0], + parameters: { + operation: 'setOutputs', + outputs: undefined, + }, + }, + ], + connections: {}, + }); + + expect(() => { + (testRunnerService as any).validateSetOutputsNodes(workflow); + }).toThrow(TestRunError); + + try { + (testRunnerService as any).validateSetOutputsNodes(workflow); + } catch (error) { + expect(error).toBeInstanceOf(TestRunError); + expect(error.code).toBe('SET_OUTPUTS_NODE_NOT_CONFIGURED'); + expect(error.extra).toEqual({ node_name: 'Set Outputs' }); + } + }); + + it('should throw SET_OUTPUTS_NODE_NOT_CONFIGURED when outputs node has empty assignments', () => { + const workflow = mock({ + nodes: [ + { + id: 'node1', + name: 'Set Outputs', + type: EVALUATION_NODE_TYPE, + typeVersion: 1, + position: [0, 0], + parameters: { + operation: 'setOutputs', + outputs: { + assignments: [], + }, + }, + }, + ], + connections: {}, + }); + + expect(() => { + (testRunnerService as any).validateSetOutputsNodes(workflow); + }).toThrow(TestRunError); + + try { + (testRunnerService as any).validateSetOutputsNodes(workflow); + } catch (error) { + expect(error).toBeInstanceOf(TestRunError); + expect(error.code).toBe('SET_OUTPUTS_NODE_NOT_CONFIGURED'); + expect(error.extra).toEqual({ node_name: 'Set Outputs' }); + } + }); + + it('should throw SET_OUTPUTS_NODE_NOT_CONFIGURED when assignment has no name', () => { + const workflow = mock({ + nodes: [ + { + id: 'node1', + name: 'Set Outputs', + type: EVALUATION_NODE_TYPE, + typeVersion: 1, + position: [0, 0], + parameters: { + operation: 'setOutputs', + outputs: { + assignments: [ + { + id: '1', + name: '', + value: 'result', + }, + ], + }, + }, + }, + ], + connections: {}, + }); + + expect(() => { + (testRunnerService as any).validateSetOutputsNodes(workflow); + }).toThrow(TestRunError); + + try { + (testRunnerService as any).validateSetOutputsNodes(workflow); + } catch (error) { + expect(error).toBeInstanceOf(TestRunError); + expect(error.code).toBe('SET_OUTPUTS_NODE_NOT_CONFIGURED'); + expect(error.extra).toEqual({ node_name: 'Set Outputs' }); + } + }); + + it('should throw SET_OUTPUTS_NODE_NOT_CONFIGURED when assignment has null value', () => { + const workflow = mock({ + nodes: [ + { + id: 'node1', + name: 'Set Outputs', + type: EVALUATION_NODE_TYPE, + typeVersion: 1, + position: [0, 0], + parameters: { + operation: 'setOutputs', + outputs: { + assignments: [ + { + id: '1', + name: 'result', + value: null, + }, + ], + }, + }, + }, + ], + connections: {}, + }); + + expect(() => { + (testRunnerService as any).validateSetOutputsNodes(workflow); + }).toThrow(TestRunError); + + try { + (testRunnerService as any).validateSetOutputsNodes(workflow); + } catch (error) { + expect(error).toBeInstanceOf(TestRunError); + expect(error.code).toBe('SET_OUTPUTS_NODE_NOT_CONFIGURED'); + expect(error.extra).toEqual({ node_name: 'Set Outputs' }); + } + }); + + it('should validate multiple outputs nodes successfully', () => { + const workflow = mock({ + nodes: [ + { + id: 'node1', + name: 'Set Outputs 1', + type: EVALUATION_NODE_TYPE, + typeVersion: 1, + position: [0, 0], + parameters: { + operation: 'setOutputs', + outputs: { + assignments: [ + { + id: '1', + name: 'result', + value: 'success', + }, + ], + }, + }, + }, + { + id: 'node2', + name: 'Set Outputs 2', + type: EVALUATION_NODE_TYPE, + typeVersion: 1, + position: [100, 0], + parameters: { + operation: 'setOutputs', + outputs: { + assignments: [ + { + id: '2', + name: 'score', + value: 95, + }, + ], + }, + }, + }, + ], + connections: {}, + }); + + expect(() => { + (testRunnerService as any).validateSetOutputsNodes(workflow); + }).not.toThrow(); + }); + }); }); diff --git a/packages/cli/src/evaluation.ee/test-runner/test-runner.service.ee.ts b/packages/cli/src/evaluation.ee/test-runner/test-runner.service.ee.ts index 0026fa1297..2d7cd6b58f 100644 --- a/packages/cli/src/evaluation.ee/test-runner/test-runner.service.ee.ts +++ b/packages/cli/src/evaluation.ee/test-runner/test-runner.service.ee.ts @@ -2,7 +2,11 @@ import type { User, TestRun } from '@n8n/db'; import { TestCaseExecutionRepository, TestRunRepository, WorkflowRepository } from '@n8n/db'; import { Service } from '@n8n/di'; import { ErrorReporter, Logger } from 'n8n-core'; -import { ExecutionCancelledError } from 'n8n-workflow'; +import { + EVALUATION_NODE_TYPE, + EVALUATION_TRIGGER_NODE_TYPE, + ExecutionCancelledError, +} from 'n8n-workflow'; import type { IDataObject, IRun, @@ -10,13 +14,14 @@ import type { IWorkflowExecutionDataProcess, IExecuteData, INodeExecutionData, + AssignmentCollectionValue, } from 'n8n-workflow'; import assert from 'node:assert'; import { ActiveExecutions } from '@/active-executions'; import config from '@/config'; -import { EVALUATION_DATASET_TRIGGER_NODE, EVALUATION_NODE } from '@/constants'; import { TestCaseExecutionError, TestRunError } from '@/evaluation.ee/test-runner/errors.ee'; +import { checkNodeParameterNotEmpty } from '@/evaluation.ee/test-runner/utils.ee'; import { Telemetry } from '@/telemetry'; import { WorkflowRunner } from '@/workflow-runner'; @@ -59,8 +64,97 @@ export class TestRunnerService { /** * Finds the dataset trigger node in the workflow */ - private findTriggerNode(workflow: IWorkflowBase) { - return workflow.nodes.find((node) => node.type === EVALUATION_DATASET_TRIGGER_NODE); + private findEvaluationTriggerNode(workflow: IWorkflowBase) { + return workflow.nodes.find((node) => node.type === EVALUATION_TRIGGER_NODE_TYPE); + } + + /** + * Validates the evaluation trigger node is present in the workflow + * and is configured correctly. + */ + private validateEvaluationTriggerNode(workflow: IWorkflowBase) { + const triggerNode = this.findEvaluationTriggerNode(workflow); + if (!triggerNode) { + throw new TestRunError('EVALUATION_TRIGGER_NOT_FOUND'); + } + + if ( + !triggerNode.credentials || + !checkNodeParameterNotEmpty(triggerNode.parameters?.documentId) || + !checkNodeParameterNotEmpty(triggerNode.parameters?.sheetName) + ) { + throw new TestRunError('EVALUATION_TRIGGER_NOT_CONFIGURED', { node_name: triggerNode.name }); + } + + if (triggerNode?.disabled) { + throw new TestRunError('EVALUATION_TRIGGER_DISABLED'); + } + } + + /** + * Checks if the Evaluation Set Metrics nodes are present in the workflow + * and are configured correctly. + */ + private validateSetMetricsNodes(workflow: IWorkflowBase) { + const metricsNodes = TestRunnerService.getEvaluationMetricsNodes(workflow); + if (metricsNodes.length === 0) { + throw new TestRunError('SET_METRICS_NODE_NOT_FOUND'); + } + + const unconfiguredMetricsNode = metricsNodes.find( + (node) => + !node.parameters || + !node.parameters.metrics || + (node.parameters.metrics as AssignmentCollectionValue).assignments?.length === 0 || + (node.parameters.metrics as AssignmentCollectionValue).assignments?.some( + (assignment) => !assignment.name || assignment.value === null, + ), + ); + + if (unconfiguredMetricsNode) { + throw new TestRunError('SET_METRICS_NODE_NOT_CONFIGURED', { + node_name: unconfiguredMetricsNode.name, + }); + } + } + + /** + * Checks if the Evaluation Set Outputs nodes are present in the workflow + * and are configured correctly. + */ + private validateSetOutputsNodes(workflow: IWorkflowBase) { + const setOutputsNodes = TestRunnerService.getEvaluationSetOutputsNodes(workflow); + if (setOutputsNodes.length === 0) { + throw new TestRunError('SET_OUTPUTS_NODE_NOT_FOUND'); + } + + const unconfiguredSetOutputsNode = setOutputsNodes.find( + (node) => + !node.parameters || + !node.parameters.outputs || + (node.parameters.outputs as AssignmentCollectionValue).assignments?.length === 0 || + (node.parameters.outputs as AssignmentCollectionValue).assignments?.some( + (assignment) => !assignment.name || assignment.value === null, + ), + ); + + if (unconfiguredSetOutputsNode) { + throw new TestRunError('SET_OUTPUTS_NODE_NOT_CONFIGURED', { + node_name: unconfiguredSetOutputsNode.name, + }); + } + } + + /** + * Validates workflow configuration for evaluation + * Throws appropriate TestRunError if validation fails + */ + private validateWorkflowConfiguration(workflow: IWorkflowBase): void { + this.validateEvaluationTriggerNode(workflow); + + this.validateSetOutputsNodes(workflow); + + this.validateSetMetricsNodes(workflow); } /** @@ -83,7 +177,7 @@ export class TestRunnerService { // Evaluation executions should run the same way as manual, // because they need pinned data and partial execution logic - const triggerNode = this.findTriggerNode(workflow); + const triggerNode = this.findEvaluationTriggerNode(workflow); assert(triggerNode); const pinData = { @@ -148,7 +242,7 @@ export class TestRunnerService { // Evaluation executions should run the same way as manual, // because they need pinned data and partial execution logic - const triggerNode = this.findTriggerNode(workflow); + const triggerNode = this.findEvaluationTriggerNode(workflow); if (!triggerNode) { throw new TestRunError('EVALUATION_TRIGGER_NOT_FOUND'); @@ -219,11 +313,22 @@ export class TestRunnerService { } /** - * Get the evaluation metrics nodes from a workflow. + * Get the evaluation set metrics nodes from a workflow. */ static getEvaluationMetricsNodes(workflow: IWorkflowBase) { return workflow.nodes.filter( - (node) => node.type === EVALUATION_NODE && node.parameters.operation === 'setMetrics', + (node) => node.type === EVALUATION_NODE_TYPE && node.parameters.operation === 'setMetrics', + ); + } + + /** + * Get the evaluation set outputs nodes from a workflow. + */ + static getEvaluationSetOutputsNodes(workflow: IWorkflowBase) { + return workflow.nodes.filter( + (node) => + node.type === EVALUATION_NODE_TYPE && + (node.parameters.operation === 'setOutputs' || node.parameters.operation === undefined), ); } @@ -231,10 +336,17 @@ export class TestRunnerService { * Extract the dataset trigger output */ private extractDatasetTriggerOutput(execution: IRun, workflow: IWorkflowBase) { - const triggerNode = this.findTriggerNode(workflow); + const triggerNode = this.findEvaluationTriggerNode(workflow); assert(triggerNode); const triggerOutputData = execution.data.resultData.runData[triggerNode.name][0]; + + if (triggerOutputData?.error) { + throw new TestRunError('CANT_FETCH_TEST_CASES', { + message: triggerOutputData.error.message, + }); + } + const triggerOutput = triggerOutputData?.data?.main?.[0]; if (!triggerOutput || triggerOutput.length === 0) { @@ -248,16 +360,16 @@ export class TestRunnerService { * Evaluation result is collected from all Evaluation Metrics nodes */ private extractEvaluationResult(execution: IRun, workflow: IWorkflowBase): IDataObject { - // TODO: Do not fail if not all metric nodes were executed const metricsNodes = TestRunnerService.getEvaluationMetricsNodes(workflow); - const metricsRunData = metricsNodes.flatMap( - (node) => execution.data.resultData.runData[node.name], - ); + + // If a metrics node did not execute, ignore it. + const metricsRunData = metricsNodes + .flatMap((node) => execution.data.resultData.runData[node.name]) + .filter((data) => data !== undefined); const metricsData = metricsRunData .reverse() .map((data) => data.data?.main?.[0]?.[0]?.json ?? {}); const metricsResult = metricsData.reduce((acc, curr) => ({ ...acc, ...curr }), {}); - return metricsResult; } @@ -294,6 +406,9 @@ export class TestRunnerService { // Update test run status await this.testRunRepository.markAsRunning(testRun.id); + // Check if the workflow is ready for evaluation + this.validateWorkflowConfiguration(workflow); + this.telemetry.track('User ran test', { user_id: user.id, run_id: testRun.id, @@ -377,19 +492,31 @@ export class TestRunnerService { this.extractEvaluationResult(testCaseExecution, workflow), ); - this.logger.debug('Test case metrics extracted', addedMetrics); - - // Create a new test case execution in DB - await this.testCaseExecutionRepository.createTestCaseExecution({ - executionId: testCaseExecutionId, - testRun: { - id: testRun.id, - }, - runAt, - completedAt, - status: 'success', - metrics: addedMetrics, - }); + if (Object.keys(addedMetrics).length === 0) { + await this.testCaseExecutionRepository.createTestCaseExecution({ + executionId: testCaseExecutionId, + testRun: { + id: testRun.id, + }, + runAt, + completedAt, + status: 'error', + errorCode: 'NO_METRICS_COLLECTED', + }); + } else { + this.logger.debug('Test case metrics extracted', addedMetrics); + // Create a new test case execution in DB + await this.testCaseExecutionRepository.createTestCaseExecution({ + executionId: testCaseExecutionId, + testRun: { + id: testRun.id, + }, + runAt, + completedAt, + status: 'success', + metrics: addedMetrics, + }); + } } catch (e) { const completedAt = new Date(); // FIXME: this is a temporary log @@ -500,7 +627,7 @@ export class TestRunnerService { } else { const { manager: dbManager } = this.testRunRepository; - // If there is no abort controller - just mark the test run and all its' pending test case executions as cancelled + // If there is no abort controller - just mark the test run and all its pending test case executions as cancelled await dbManager.transaction(async (trx) => { await this.testRunRepository.markAsCancelled(testRunId, trx); await this.testCaseExecutionRepository.markAllPendingAsCancelled(testRunId, trx); diff --git a/packages/cli/src/evaluation.ee/test-runner/utils.ee.ts b/packages/cli/src/evaluation.ee/test-runner/utils.ee.ts new file mode 100644 index 0000000000..36f30762a4 --- /dev/null +++ b/packages/cli/src/evaluation.ee/test-runner/utils.ee.ts @@ -0,0 +1,19 @@ +import type { NodeParameterValueType, INodeParameterResourceLocator } from 'n8n-workflow'; + +function isRlcValue(value: NodeParameterValueType): value is INodeParameterResourceLocator { + return Boolean( + typeof value === 'object' && value && 'value' in value && '__rl' in value && value.__rl, + ); +} + +export function checkNodeParameterNotEmpty(value: NodeParameterValueType) { + if (value === undefined || value === null || value === '') { + return false; + } + + if (isRlcValue(value)) { + return checkNodeParameterNotEmpty(value.value); + } + + return true; +} diff --git a/packages/cli/src/interfaces.ts b/packages/cli/src/interfaces.ts index 1879950f4e..22c4471aec 100644 --- a/packages/cli/src/interfaces.ts +++ b/packages/cli/src/interfaces.ts @@ -200,6 +200,10 @@ export interface ILicenseReadResponse { value: number; warningThreshold: number; }; + workflowsHavingEvaluations: { + limit: number; + value: number; + }; }; license: { planId: string; diff --git a/packages/cli/src/license/__tests__/license.service.test.ts b/packages/cli/src/license/__tests__/license.service.test.ts index df1ae76300..895861ce86 100644 --- a/packages/cli/src/license/__tests__/license.service.test.ts +++ b/packages/cli/src/license/__tests__/license.service.test.ts @@ -1,3 +1,4 @@ +import type { LicenseState } from '@n8n/backend-common'; import type { WorkflowRepository } from '@n8n/db'; import type { TEntitlement } from '@n8n_io/license-sdk'; import axios, { AxiosError } from 'axios'; @@ -12,12 +13,14 @@ jest.mock('axios'); describe('LicenseService', () => { const license = mock(); + const licenseState = mock(); const workflowRepository = mock(); const entitlement = mock({ productId: '123' }); const eventService = mock(); const licenseService = new LicenseService( mock(), license, + licenseState, workflowRepository, mock(), eventService, @@ -26,7 +29,9 @@ describe('LicenseService', () => { license.getMainPlan.mockReturnValue(entitlement); license.getTriggerLimit.mockReturnValue(400); license.getPlanName.mockReturnValue('Test Plan'); + licenseState.getMaxWorkflowsWithEvaluations.mockReturnValue(2); workflowRepository.getActiveTriggerCount.mockResolvedValue(7); + workflowRepository.getWorkflowsWithEvaluationCount.mockResolvedValue(1); beforeEach(() => jest.clearAllMocks()); @@ -46,6 +51,10 @@ describe('LicenseService', () => { value: 7, warningThreshold: 0.8, }, + workflowsHavingEvaluations: { + limit: 2, + value: 1, + }, }, license: { planId: '123', diff --git a/packages/cli/src/license/license.service.ts b/packages/cli/src/license/license.service.ts index 40ae9dd55a..80526f8260 100644 --- a/packages/cli/src/license/license.service.ts +++ b/packages/cli/src/license/license.service.ts @@ -1,3 +1,4 @@ +import { LicenseState } from '@n8n/backend-common'; import type { User } from '@n8n/db'; import { WorkflowRepository } from '@n8n/db'; import { Service } from '@n8n/di'; @@ -26,6 +27,7 @@ export class LicenseService { constructor( private readonly logger: Logger, private readonly license: License, + private readonly licenseState: LicenseState, private readonly workflowRepository: WorkflowRepository, private readonly urlService: UrlService, private readonly eventService: EventService, @@ -33,6 +35,8 @@ export class LicenseService { async getLicenseData() { const triggerCount = await this.workflowRepository.getActiveTriggerCount(); + const workflowsWithEvaluationsCount = + await this.workflowRepository.getWorkflowsWithEvaluationCount(); const mainPlan = this.license.getMainPlan(); return { @@ -42,6 +46,10 @@ export class LicenseService { limit: this.license.getTriggerLimit(), warningThreshold: 0.8, }, + workflowsHavingEvaluations: { + value: workflowsWithEvaluationsCount, + limit: this.licenseState.getMaxWorkflowsWithEvaluations(), + }, }, license: { planId: mainPlan?.productId ?? '', diff --git a/packages/cli/src/metrics/__tests__/license-metrics.service.test.ts b/packages/cli/src/metrics/__tests__/license-metrics.service.test.ts index 1c5e54423f..17db323a77 100644 --- a/packages/cli/src/metrics/__tests__/license-metrics.service.test.ts +++ b/packages/cli/src/metrics/__tests__/license-metrics.service.test.ts @@ -37,7 +37,11 @@ describe('LicenseMetricsService', () => { describe('collectUsageMetrics', () => { test('should return an array of expected usage metrics', async () => { const mockActiveTriggerCount = 1234; + const mockWorkflowsWithEvaluationsCount = 5; workflowRepository.getActiveTriggerCount.mockResolvedValue(mockActiveTriggerCount); + workflowRepository.getWorkflowsWithEvaluationCount.mockResolvedValue( + mockWorkflowsWithEvaluationsCount, + ); const mockRenewalMetrics = { activeWorkflows: 100, @@ -48,6 +52,7 @@ describe('LicenseMetricsService', () => { productionExecutions: 600, productionRootExecutions: 550, manualExecutions: 700, + evaluations: 5, }; licenseMetricsRespository.getLicenseRenewalMetrics.mockResolvedValue(mockRenewalMetrics); @@ -67,6 +72,7 @@ describe('LicenseMetricsService', () => { }, { name: 'manualExecutions', value: mockRenewalMetrics.manualExecutions }, { name: 'activeWorkflowTriggers', value: mockActiveTriggerCount }, + { name: 'evaluations', value: mockRenewalMetrics.evaluations }, ]); }); }); diff --git a/packages/cli/src/metrics/license-metrics.service.ts b/packages/cli/src/metrics/license-metrics.service.ts index d60cb7638d..f8ff290abf 100644 --- a/packages/cli/src/metrics/license-metrics.service.ts +++ b/packages/cli/src/metrics/license-metrics.service.ts @@ -20,7 +20,10 @@ export class LicenseMetricsService { manualExecutions, } = await this.licenseMetricsRepository.getLicenseRenewalMetrics(); - const activeTriggerCount = await this.workflowRepository.getActiveTriggerCount(); + const [activeTriggerCount, workflowsWithEvaluationsCount] = await Promise.all([ + this.workflowRepository.getActiveTriggerCount(), + this.workflowRepository.getWorkflowsWithEvaluationCount(), + ]); return [ { name: 'activeWorkflows', value: activeWorkflows }, @@ -32,6 +35,7 @@ export class LicenseMetricsService { { name: 'productionRootExecutions', value: productionRootExecutions }, { name: 'manualExecutions', value: manualExecutions }, { name: 'activeWorkflowTriggers', value: activeTriggerCount }, + { name: 'evaluations', value: workflowsWithEvaluationsCount }, ]; } diff --git a/packages/cli/src/services/frontend.service.ts b/packages/cli/src/services/frontend.service.ts index 6dc2dc2133..6e58787727 100644 --- a/packages/cli/src/services/frontend.service.ts +++ b/packages/cli/src/services/frontend.service.ts @@ -256,6 +256,9 @@ export class FrontendService { logsView: { enabled: false, }, + evaluation: { + quota: this.licenseState.getMaxWorkflowsWithEvaluations(), + }, }; } @@ -395,6 +398,9 @@ export class FrontendService { this.settings.logsView.enabled = config.get('logs_view.enabled'); + // Refresh evaluation settings + this.settings.evaluation.quota = this.licenseState.getMaxWorkflowsWithEvaluations(); + return this.settings; } diff --git a/packages/cli/test/integration/database/repositories/workflow.repository.test.ts b/packages/cli/test/integration/database/repositories/workflow.repository.test.ts index a026a3a029..1908c65f4d 100644 --- a/packages/cli/test/integration/database/repositories/workflow.repository.test.ts +++ b/packages/cli/test/integration/database/repositories/workflow.repository.test.ts @@ -1,6 +1,7 @@ import { WorkflowRepository } from '@n8n/db'; import { Container } from '@n8n/di'; +import { createTestRun } from '../../shared/db/evaluation'; import { createWorkflowWithTrigger, createWorkflow, @@ -115,4 +116,73 @@ describe('WorkflowRepository', () => { expect(activeIds).toHaveLength(1); }); }); + + describe('getWorkflowsWithEvaluationCount', () => { + it('should return 0 when no workflows have test runs', async () => { + // + // ARRANGE + // + const workflowRepository = Container.get(WorkflowRepository); + await createWorkflow(); + await createWorkflow(); + + // + // ACT + // + const count = await workflowRepository.getWorkflowsWithEvaluationCount(); + + // + // ASSERT + // + expect(count).toBe(0); + }); + + it('should return correct count when some workflows have test runs', async () => { + // + // ARRANGE + // + const workflowRepository = Container.get(WorkflowRepository); + const workflow1 = await createWorkflow(); + await createWorkflow(); + const workflow3 = await createWorkflow(); + + await createTestRun(workflow1.id); + await createTestRun(workflow3.id); + + // + // ACT + // + const count = await workflowRepository.getWorkflowsWithEvaluationCount(); + + // + // ASSERT + // + expect(count).toBe(2); + }); + + it('should count each workflow only once even with multiple test runs', async () => { + // + // ARRANGE + // + const workflowRepository = Container.get(WorkflowRepository); + const workflow1 = await createWorkflow(); + const workflow2 = await createWorkflow(); + + await createTestRun(workflow1.id); + await createTestRun(workflow1.id); + await createTestRun(workflow1.id); + await createTestRun(workflow2.id); + await createTestRun(workflow2.id); + + // + // ACT + // + const count = await workflowRepository.getWorkflowsWithEvaluationCount(); + + // + // ASSERT + // + expect(count).toBe(2); + }); + }); }); diff --git a/packages/cli/test/integration/license-metrics.repository.test.ts b/packages/cli/test/integration/license-metrics.repository.test.ts index 0b4e3f7670..27173b83f0 100644 --- a/packages/cli/test/integration/license-metrics.repository.test.ts +++ b/packages/cli/test/integration/license-metrics.repository.test.ts @@ -83,6 +83,7 @@ describe('LicenseMetricsRepository', () => { productionExecutions: 3, productionRootExecutions: 3, manualExecutions: 2, + evaluations: 0, }); }); @@ -100,6 +101,7 @@ describe('LicenseMetricsRepository', () => { productionExecutions: 0, // not NaN productionRootExecutions: 0, // not NaN manualExecutions: 0, // not NaN + evaluations: 0, }); }); }); diff --git a/packages/cli/test/integration/license.api.test.ts b/packages/cli/test/integration/license.api.test.ts index fda70f74f8..a7ccf36b22 100644 --- a/packages/cli/test/integration/license.api.test.ts +++ b/packages/cli/test/integration/license.api.test.ts @@ -119,6 +119,10 @@ const DEFAULT_LICENSE_RESPONSE: { data: ILicenseReadResponse } = { limit: -1, warningThreshold: 0.8, }, + workflowsHavingEvaluations: { + value: 0, + limit: 0, + }, }, license: { planId: '', @@ -135,6 +139,10 @@ const DEFAULT_POST_RESPONSE: { data: ILicensePostResponse } = { limit: -1, warningThreshold: 0.8, }, + workflowsHavingEvaluations: { + value: 0, + limit: 0, + }, }, license: { planId: '', diff --git a/packages/frontend/@n8n/stores/src/constants.ts b/packages/frontend/@n8n/stores/src/constants.ts index 1de5b0ff8d..46b628956b 100644 --- a/packages/frontend/@n8n/stores/src/constants.ts +++ b/packages/frontend/@n8n/stores/src/constants.ts @@ -27,7 +27,7 @@ export const STORES = { BECOME_TEMPLATE_CREATOR: 'becomeTemplateCreator', PROJECTS: 'projects', API_KEYS: 'apiKeys', - TEST_DEFINITION: 'testDefinition', + EVALUATION: 'evaluation', FOLDERS: 'folders', MODULES: 'modules', } as const; diff --git a/packages/frontend/@n8n/stores/src/useAgentRequestStore.test.ts b/packages/frontend/@n8n/stores/src/useAgentRequestStore.test.ts index d47abefbf4..d901cdbd78 100644 --- a/packages/frontend/@n8n/stores/src/useAgentRequestStore.test.ts +++ b/packages/frontend/@n8n/stores/src/useAgentRequestStore.test.ts @@ -1,8 +1,9 @@ import { setActivePinia, createPinia } from 'pinia'; -import { useAgentRequestStore } from './useAgentRequestStore'; import { beforeEach, describe, expect, it, vi } from 'vitest'; import { nextTick } from 'vue'; +import { useAgentRequestStore } from './useAgentRequestStore'; + // Mock localStorage const localStorageMock = { getItem: vi.fn(), @@ -104,7 +105,7 @@ describe('parameterOverrides.store', () => { store.addAgentRequest('workflow-1', 'node-1', 'param1', 'value1'); - expect(store.agentRequests['workflow-1']['node-1']['param1']).toBe('value1'); + expect(store.agentRequests['workflow-1']['node-1'].param1).toBe('value1'); }); it('adds multiple parameter overrides', () => { diff --git a/packages/frontend/editor-ui/src/Interface.ts b/packages/frontend/editor-ui/src/Interface.ts index a5a273c23e..680e6a9eef 100644 --- a/packages/frontend/editor-ui/src/Interface.ts +++ b/packages/frontend/editor-ui/src/Interface.ts @@ -55,6 +55,7 @@ import type { AI_OTHERS_NODE_CREATOR_VIEW, ROLE, AI_UNCATEGORIZED_CATEGORY, + AI_EVALUATION, } from '@/constants'; import type { BulkCommand, Undoable } from '@/models/history'; @@ -1079,7 +1080,8 @@ export type NodeFilterType = | typeof TRIGGER_NODE_CREATOR_VIEW | typeof AI_NODE_CREATOR_VIEW | typeof AI_OTHERS_NODE_CREATOR_VIEW - | typeof AI_UNCATEGORIZED_CATEGORY; + | typeof AI_UNCATEGORIZED_CATEGORY + | typeof AI_EVALUATION; export type NodeCreatorOpenSource = | '' @@ -1092,7 +1094,9 @@ export type NodeCreatorOpenSource = | 'node_connection_action' | 'node_connection_drop' | 'notice_error_message' - | 'add_node_button'; + | 'add_node_button' + | 'add_evaluation_trigger_button' + | 'add_evaluation_node_button'; export interface INodeCreatorState { itemsFilter: string; @@ -1318,6 +1322,10 @@ export type UsageState = { value: number; warningThreshold: number; // hardcoded value in BE }; + workflowsHavingEvaluations: { + limit: number; // -1 for unlimited, from license + value: number; + }; }; license: { planId: string; // community @@ -1466,7 +1474,8 @@ export type CloudUpdateLinkSourceType = | 'external-secrets' | 'rbac' | 'debug' - | 'insights'; + | 'insights' + | 'evaluations'; export type UTMCampaign = | 'upgrade-custom-data-filter' @@ -1490,7 +1499,8 @@ export type UTMCampaign = | 'upgrade-external-secrets' | 'upgrade-rbac' | 'upgrade-debug' - | 'upgrade-insights'; + | 'upgrade-insights' + | 'upgrade-evaluations'; export type N8nBanners = { [key in BannerName]: { diff --git a/packages/frontend/editor-ui/src/__tests__/defaults.ts b/packages/frontend/editor-ui/src/__tests__/defaults.ts index 707790f5eb..41dd1d6b97 100644 --- a/packages/frontend/editor-ui/src/__tests__/defaults.ts +++ b/packages/frontend/editor-ui/src/__tests__/defaults.ts @@ -160,4 +160,7 @@ export const defaultSettings: FrontendSettings = { logsView: { enabled: false, }, + evaluation: { + quota: 0, + }, }; diff --git a/packages/frontend/editor-ui/src/api/evaluation.ee.ts b/packages/frontend/editor-ui/src/api/evaluation.ee.ts new file mode 100644 index 0000000000..f38a0c3321 --- /dev/null +++ b/packages/frontend/editor-ui/src/api/evaluation.ee.ts @@ -0,0 +1,108 @@ +import type { IRestApiContext } from '@/Interface'; +import { makeRestApiRequest, request } from '@/utils/apiUtils'; + +export interface TestRunRecord { + id: string; + workflowId: string; + status: 'new' | 'running' | 'completed' | 'error' | 'cancelled' | 'warning' | 'success'; + metrics?: Record; + createdAt: string; + updatedAt: string; + runAt: string; + completedAt: string; + errorCode?: string; + errorDetails?: Record; + finalResult?: 'success' | 'error' | 'warning'; +} + +interface GetTestRunParams { + workflowId: string; + runId: string; +} + +interface DeleteTestRunParams { + workflowId: string; + runId: string; +} + +export interface TestCaseExecutionRecord { + id: string; + testRunId: string; + executionId: string; + status: 'running' | 'completed' | 'error'; + createdAt: string; + updatedAt: string; + runAt: string; + metrics?: Record; + errorCode?: string; + errorDetails?: Record; +} + +const getTestRunsEndpoint = (workflowId: string, runId?: string) => + `/workflows/${workflowId}/test-runs${runId ? `/${runId}` : ''}`; + +// Get all test runs for a test definition +export const getTestRuns = async (context: IRestApiContext, workflowId: string) => { + return await makeRestApiRequest(context, 'GET', getTestRunsEndpoint(workflowId)); +}; + +// Get specific test run +export const getTestRun = async (context: IRestApiContext, params: GetTestRunParams) => { + return await makeRestApiRequest( + context, + 'GET', + getTestRunsEndpoint(params.workflowId, params.runId), + ); +}; + +// Start a new test run +export const startTestRun = async (context: IRestApiContext, workflowId: string) => { + const response = await request({ + method: 'POST', + baseURL: context.baseUrl, + endpoint: `/workflows/${workflowId}/test-runs/new`, + headers: { 'push-ref': context.pushRef }, + }); + // CLI is returning the response without wrapping it in `data` key + return response as { success: boolean }; +}; + +export const cancelTestRun = async ( + context: IRestApiContext, + workflowId: string, + testRunId: string, +) => { + const response = await request({ + method: 'POST', + baseURL: context.baseUrl, + endpoint: `/workflows/${workflowId}/test-runs/${testRunId}/cancel`, + headers: { 'push-ref': context.pushRef }, + }); + // CLI is returning the response without wrapping it in `data` key + return response as { success: boolean }; +}; + +// Delete a test run +export const deleteTestRun = async (context: IRestApiContext, params: DeleteTestRunParams) => { + return await makeRestApiRequest<{ success: boolean }>( + context, + 'DELETE', + getTestRunsEndpoint(params.workflowId, params.runId), + ); +}; + +const getRunExecutionsEndpoint = (workflowId: string, runId: string) => + `/workflows/${workflowId}/test-runs/${runId}/test-cases`; + +// Get all test cases of a test run +export const getTestCaseExecutions = async ( + context: IRestApiContext, + workflowId: string, + runId: string, +) => { + return await makeRestApiRequest( + context, + 'GET', + getRunExecutionsEndpoint(workflowId, runId), + ); +}; diff --git a/packages/frontend/editor-ui/src/api/testDefinition.ee.ts b/packages/frontend/editor-ui/src/api/testDefinition.ee.ts deleted file mode 100644 index 14278c77c4..0000000000 --- a/packages/frontend/editor-ui/src/api/testDefinition.ee.ts +++ /dev/null @@ -1,210 +0,0 @@ -import type { IRestApiContext } from '@/Interface'; -import { makeRestApiRequest, request } from '@/utils/apiUtils'; - -export interface TestDefinitionRecord { - id: string; - name: string; - workflowId: string; - evaluationWorkflowId?: string | null; - annotationTagId?: string | null; - description?: string | null; - updatedAt?: string; - createdAt: string; - annotationTag?: string | null; - mockedNodes?: Array<{ name: string; id: string }>; -} - -interface CreateTestDefinitionParams { - name: string; - workflowId: string; - evaluationWorkflowId?: string | null; -} - -export interface UpdateTestDefinitionParams { - name?: string; - evaluationWorkflowId?: string | null; - annotationTagId?: string | null; - description?: string | null; - mockedNodes?: Array<{ name: string; id: string }>; -} - -export interface UpdateTestResponse { - createdAt: string; - updatedAt: string; - id: string; - name: string; - workflowId: string; - description?: string | null; - annotationTag?: string | null; - evaluationWorkflowId?: string | null; - annotationTagId?: string | null; -} - -export interface TestRunRecord { - id: string; - testDefinitionId: string; - status: 'new' | 'running' | 'completed' | 'error' | 'cancelled' | 'warning' | 'success'; - metrics?: Record; - createdAt: string; - updatedAt: string; - runAt: string; - completedAt: string; - errorCode?: string; - errorDetails?: Record; - finalResult?: 'success' | 'error' | 'warning'; -} - -interface GetTestRunParams { - testDefinitionId: string; - runId: string; -} - -interface DeleteTestRunParams { - testDefinitionId: string; - runId: string; -} - -export interface TestCaseExecutionRecord { - id: string; - testRunId: string; - executionId: string; - pastExecutionId: string; - evaluationExecutionId: string; - status: 'running' | 'completed' | 'error'; - createdAt: string; - updatedAt: string; - runAt: string; - metrics?: Record; - errorCode?: string; - errorDetails?: Record; -} - -const endpoint = '/evaluation/test-definitions'; - -export async function getTestDefinitions( - context: IRestApiContext, - params?: { workflowId?: string }, -) { - let url = endpoint; - if (params?.workflowId) { - url += `?filter=${JSON.stringify({ workflowId: params.workflowId })}`; - } - return await makeRestApiRequest<{ count: number; testDefinitions: TestDefinitionRecord[] }>( - context, - 'GET', - url, - ); -} - -export async function getTestDefinition(context: IRestApiContext, id: string) { - return await makeRestApiRequest(context, 'GET', `${endpoint}/${id}`); -} - -export async function createTestDefinition( - context: IRestApiContext, - params: CreateTestDefinitionParams, -) { - return await makeRestApiRequest(context, 'POST', endpoint, params); -} - -export async function updateTestDefinition( - context: IRestApiContext, - id: string, - params: UpdateTestDefinitionParams, -) { - return await makeRestApiRequest( - context, - 'PATCH', - `${endpoint}/${id}`, - params, - ); -} - -export async function deleteTestDefinition(context: IRestApiContext, id: string) { - return await makeRestApiRequest<{ success: boolean }>(context, 'DELETE', `${endpoint}/${id}`); -} - -export async function getExampleEvaluationInput( - context: IRestApiContext, - testDefinitionId: string, - annotationTagId: string, -) { - return await makeRestApiRequest | null>( - context, - 'GET', - `${endpoint}/${testDefinitionId}/example-evaluation-input?annotationTagId=${annotationTagId}`, - ); -} - -const getRunsEndpoint = (testDefinitionId: string, runId?: string) => - `${endpoint}/${testDefinitionId}/runs${runId ? `/${runId}` : ''}`; - -// Get all test runs for a test definition -export const getTestRuns = async (context: IRestApiContext, testDefinitionId: string) => { - return await makeRestApiRequest( - context, - 'GET', - getRunsEndpoint(testDefinitionId), - ); -}; - -// Get specific test run -export const getTestRun = async (context: IRestApiContext, params: GetTestRunParams) => { - return await makeRestApiRequest( - context, - 'GET', - getRunsEndpoint(params.testDefinitionId, params.runId), - ); -}; - -// Start a new test run -export const startTestRun = async (context: IRestApiContext, testDefinitionId: string) => { - const response = await request({ - method: 'POST', - baseURL: context.baseUrl, - endpoint: `${endpoint}/${testDefinitionId}/run`, - headers: { 'push-ref': context.pushRef }, - }); - // CLI is returning the response without wrapping it in `data` key - return response as { success: boolean }; -}; - -export const cancelTestRun = async ( - context: IRestApiContext, - testDefinitionId: string, - testRunId: string, -) => { - const response = await request({ - method: 'POST', - baseURL: context.baseUrl, - endpoint: `${endpoint}/${testDefinitionId}/runs/${testRunId}/cancel`, - headers: { 'push-ref': context.pushRef }, - }); - // CLI is returning the response without wrapping it in `data` key - return response as { success: boolean }; -}; - -// Delete a test run -export const deleteTestRun = async (context: IRestApiContext, params: DeleteTestRunParams) => { - return await makeRestApiRequest<{ success: boolean }>( - context, - 'DELETE', - getRunsEndpoint(params.testDefinitionId, params.runId), - ); -}; - -const getRunExecutionsEndpoint = (testDefinitionId: string, runId: string) => - `${endpoint}/${testDefinitionId}/runs/${runId}/cases`; - -// Get all test cases of a test run -export const getTestCaseExecutions = async ( - context: IRestApiContext, - testDefinitionId: string, - runId: string, -) => { - return await makeRestApiRequest( - context, - 'GET', - getRunExecutionsEndpoint(testDefinitionId, runId), - ); -}; diff --git a/packages/frontend/editor-ui/src/components/TestDefinition/ListRuns/MetricsChart.vue b/packages/frontend/editor-ui/src/components/Evaluations.ee/ListRuns/MetricsChart.vue similarity index 97% rename from packages/frontend/editor-ui/src/components/TestDefinition/ListRuns/MetricsChart.vue rename to packages/frontend/editor-ui/src/components/Evaluations.ee/ListRuns/MetricsChart.vue index 7d8f5a0025..5ac2f62ed4 100644 --- a/packages/frontend/editor-ui/src/components/TestDefinition/ListRuns/MetricsChart.vue +++ b/packages/frontend/editor-ui/src/components/Evaluations.ee/ListRuns/MetricsChart.vue @@ -1,5 +1,5 @@ diff --git a/packages/frontend/editor-ui/src/components/Evaluations.ee/ListRuns/TestRunsTable.vue b/packages/frontend/editor-ui/src/components/Evaluations.ee/ListRuns/TestRunsTable.vue new file mode 100644 index 0000000000..22ead21cdc --- /dev/null +++ b/packages/frontend/editor-ui/src/components/Evaluations.ee/ListRuns/TestRunsTable.vue @@ -0,0 +1,177 @@ + + + + + diff --git a/packages/frontend/editor-ui/src/components/Evaluations.ee/Paywall/EvaluationsPaywall.vue b/packages/frontend/editor-ui/src/components/Evaluations.ee/Paywall/EvaluationsPaywall.vue new file mode 100644 index 0000000000..2832126340 --- /dev/null +++ b/packages/frontend/editor-ui/src/components/Evaluations.ee/Paywall/EvaluationsPaywall.vue @@ -0,0 +1,27 @@ + + + diff --git a/packages/frontend/editor-ui/src/components/Evaluations.ee/SetupWizard/SetupWizard.vue b/packages/frontend/editor-ui/src/components/Evaluations.ee/SetupWizard/SetupWizard.vue new file mode 100644 index 0000000000..bbfafe1bab --- /dev/null +++ b/packages/frontend/editor-ui/src/components/Evaluations.ee/SetupWizard/SetupWizard.vue @@ -0,0 +1,325 @@ + + + + + diff --git a/packages/frontend/editor-ui/src/components/TestDefinition/composables/useMetricsChart.ts b/packages/frontend/editor-ui/src/components/Evaluations.ee/composables/useMetricsChart.ts similarity index 98% rename from packages/frontend/editor-ui/src/components/TestDefinition/composables/useMetricsChart.ts rename to packages/frontend/editor-ui/src/components/Evaluations.ee/composables/useMetricsChart.ts index 8fca3ff514..7337120060 100644 --- a/packages/frontend/editor-ui/src/components/TestDefinition/composables/useMetricsChart.ts +++ b/packages/frontend/editor-ui/src/components/Evaluations.ee/composables/useMetricsChart.ts @@ -1,5 +1,5 @@ import type { ChartData, ChartOptions } from 'chart.js'; -import type { TestRunRecord } from '@/api/testDefinition.ee'; +import type { TestRunRecord } from '@/api/evaluation.ee'; import dateFormat from 'dateformat'; import { useCssVar } from '@vueuse/core'; diff --git a/packages/frontend/editor-ui/src/components/Evaluations.ee/shared/StepHeader.vue b/packages/frontend/editor-ui/src/components/Evaluations.ee/shared/StepHeader.vue new file mode 100644 index 0000000000..ee9e8dae9d --- /dev/null +++ b/packages/frontend/editor-ui/src/components/Evaluations.ee/shared/StepHeader.vue @@ -0,0 +1,66 @@ + + + + + diff --git a/packages/frontend/editor-ui/src/components/Evaluations.ee/shared/StepIndicator.vue b/packages/frontend/editor-ui/src/components/Evaluations.ee/shared/StepIndicator.vue new file mode 100644 index 0000000000..384241b4ac --- /dev/null +++ b/packages/frontend/editor-ui/src/components/Evaluations.ee/shared/StepIndicator.vue @@ -0,0 +1,61 @@ + + + + + diff --git a/packages/frontend/editor-ui/src/components/TestDefinition/shared/TableCell.vue b/packages/frontend/editor-ui/src/components/Evaluations.ee/shared/TableCell.vue similarity index 100% rename from packages/frontend/editor-ui/src/components/TestDefinition/shared/TableCell.vue rename to packages/frontend/editor-ui/src/components/Evaluations.ee/shared/TableCell.vue diff --git a/packages/frontend/editor-ui/src/components/TestDefinition/shared/TableStatusCell.vue b/packages/frontend/editor-ui/src/components/Evaluations.ee/shared/TableStatusCell.vue similarity index 57% rename from packages/frontend/editor-ui/src/components/TestDefinition/shared/TableStatusCell.vue rename to packages/frontend/editor-ui/src/components/Evaluations.ee/shared/TableStatusCell.vue index 72877554fe..50f4e35d69 100644 --- a/packages/frontend/editor-ui/src/components/TestDefinition/shared/TableStatusCell.vue +++ b/packages/frontend/editor-ui/src/components/Evaluations.ee/shared/TableStatusCell.vue @@ -1,7 +1,7 @@ - - - - diff --git a/packages/frontend/editor-ui/src/components/TestDefinition/EditDefinition/DescriptionInput.vue b/packages/frontend/editor-ui/src/components/TestDefinition/EditDefinition/DescriptionInput.vue deleted file mode 100644 index c858e04b0c..0000000000 --- a/packages/frontend/editor-ui/src/components/TestDefinition/EditDefinition/DescriptionInput.vue +++ /dev/null @@ -1,84 +0,0 @@ - - - - - diff --git a/packages/frontend/editor-ui/src/components/TestDefinition/EditDefinition/EvaluationStep.vue b/packages/frontend/editor-ui/src/components/TestDefinition/EditDefinition/EvaluationStep.vue deleted file mode 100644 index f77397b847..0000000000 --- a/packages/frontend/editor-ui/src/components/TestDefinition/EditDefinition/EvaluationStep.vue +++ /dev/null @@ -1,219 +0,0 @@ - - - - - diff --git a/packages/frontend/editor-ui/src/components/TestDefinition/EditDefinition/NodesPinning.vue b/packages/frontend/editor-ui/src/components/TestDefinition/EditDefinition/NodesPinning.vue deleted file mode 100644 index 60060ff12f..0000000000 --- a/packages/frontend/editor-ui/src/components/TestDefinition/EditDefinition/NodesPinning.vue +++ /dev/null @@ -1,247 +0,0 @@ - - - - - diff --git a/packages/frontend/editor-ui/src/components/TestDefinition/EditDefinition/TagsInput.vue b/packages/frontend/editor-ui/src/components/TestDefinition/EditDefinition/TagsInput.vue deleted file mode 100644 index 8b5295d0fd..0000000000 --- a/packages/frontend/editor-ui/src/components/TestDefinition/EditDefinition/TagsInput.vue +++ /dev/null @@ -1,113 +0,0 @@ - - - - - diff --git a/packages/frontend/editor-ui/src/components/TestDefinition/EditDefinition/TestNameInput.vue b/packages/frontend/editor-ui/src/components/TestDefinition/EditDefinition/TestNameInput.vue deleted file mode 100644 index b0416bbb9e..0000000000 --- a/packages/frontend/editor-ui/src/components/TestDefinition/EditDefinition/TestNameInput.vue +++ /dev/null @@ -1,69 +0,0 @@ - - - - - diff --git a/packages/frontend/editor-ui/src/components/TestDefinition/EditDefinition/WorkflowSelector.vue b/packages/frontend/editor-ui/src/components/TestDefinition/EditDefinition/WorkflowSelector.vue deleted file mode 100644 index 65d979cb90..0000000000 --- a/packages/frontend/editor-ui/src/components/TestDefinition/EditDefinition/WorkflowSelector.vue +++ /dev/null @@ -1,122 +0,0 @@ - - diff --git a/packages/frontend/editor-ui/src/components/TestDefinition/EditDefinition/sections/ConfigSection.vue b/packages/frontend/editor-ui/src/components/TestDefinition/EditDefinition/sections/ConfigSection.vue deleted file mode 100644 index c02d02064d..0000000000 --- a/packages/frontend/editor-ui/src/components/TestDefinition/EditDefinition/sections/ConfigSection.vue +++ /dev/null @@ -1,216 +0,0 @@ - - - - - diff --git a/packages/frontend/editor-ui/src/components/TestDefinition/EditDefinition/tests/NodesPinning.test.ts b/packages/frontend/editor-ui/src/components/TestDefinition/EditDefinition/tests/NodesPinning.test.ts deleted file mode 100644 index 09c9c1afaf..0000000000 --- a/packages/frontend/editor-ui/src/components/TestDefinition/EditDefinition/tests/NodesPinning.test.ts +++ /dev/null @@ -1,140 +0,0 @@ -import { waitFor } from '@testing-library/vue'; -import { createPinia, setActivePinia } from 'pinia'; -import { createTestingPinia } from '@pinia/testing'; -import NodesPinning from '../NodesPinning.vue'; -import { createComponentRenderer } from '@/__tests__/render'; -import { useWorkflowsStore } from '@/stores/workflows.store'; -import { useNodeTypesStore } from '@/stores/nodeTypes.store'; - -import { - createTestNode, - createTestWorkflow, - createTestWorkflowObject, - mockNodeTypeDescription, -} from '@/__tests__/mocks'; -import { mockedStore } from '@/__tests__/utils'; -import { NodeConnectionTypes } from 'n8n-workflow'; -import { SET_NODE_TYPE } from '@/constants'; - -vi.mock('vue-router', () => { - const push = vi.fn(); - return { - useRouter: () => ({ - push, - }), - useRoute: () => ({ - params: { - name: 'test-workflow', - testId: 'test-123', - }, - }), - RouterLink: { - template: '', - }, - }; -}); - -const renderComponent = createComponentRenderer(NodesPinning, { - props: { - modelValue: [{ id: '1', name: 'Node 1' }], - }, - global: { - plugins: [createTestingPinia()], - }, -}); - -describe('NodesPinning', () => { - const workflowsStore = mockedStore(useWorkflowsStore); - const nodes = [ - createTestNode({ id: '1', name: 'Node 1', type: SET_NODE_TYPE }), - createTestNode({ id: '2', name: 'Node 2', type: SET_NODE_TYPE }), - ]; - - const nodeTypesStore = mockedStore(useNodeTypesStore); - const nodeTypeDescription = mockNodeTypeDescription({ - name: SET_NODE_TYPE, - inputs: [NodeConnectionTypes.Main], - outputs: [NodeConnectionTypes.Main], - }); - nodeTypesStore.nodeTypes = { - node: { 1: nodeTypeDescription }, - }; - - nodeTypesStore.getNodeType = vi.fn().mockReturnValue(nodeTypeDescription); - const workflow = createTestWorkflow({ - id: 'test-workflow', - name: 'Test Workflow', - nodes, - connections: {}, - }); - - const workflowObject = createTestWorkflowObject(workflow); - - workflowsStore.getWorkflowById = vi.fn().mockReturnValue(workflow); - workflowsStore.getCurrentWorkflow = vi.fn().mockReturnValue(workflowObject); - beforeEach(() => { - const pinia = createPinia(); - setActivePinia(pinia); - - nodeTypesStore.setNodeTypes([nodeTypeDescription]); - }); - - afterEach(() => { - vi.clearAllMocks(); - }); - - it('should render workflow nodes', async () => { - const { container } = renderComponent(); - - await waitFor(() => { - expect(container.querySelectorAll('.vue-flow__node')).toHaveLength(2); - }); - - expect(container.querySelector('[data-node-name="Node 1"]')).toBeInTheDocument(); - expect(container.querySelector('[data-node-name="Node 2"]')).toBeInTheDocument(); - }); - - it('should update UI when pinning/unpinning nodes', async () => { - const { container, getAllByTestId } = renderComponent(); - - await waitFor(() => { - expect(container.querySelector('[data-node-name="Node 1"]')).toBeInTheDocument(); - }); - - const buttons = getAllByTestId('node-pin-button'); - expect(buttons.length).toBe(2); - - expect(buttons[0]).toHaveTextContent('Unpin'); - expect(buttons[1]).toHaveTextContent('Pin'); - }); - - it('should emit update:modelValue when pinning nodes', async () => { - const { container, emitted, getAllByTestId } = renderComponent(); - - await waitFor(() => { - expect(container.querySelector('[data-node-name="Node 1"]')).toBeInTheDocument(); - }); - const pinButton = getAllByTestId('node-pin-button')[1]; - pinButton?.click(); - - expect(emitted('update:modelValue')).toBeTruthy(); - expect(emitted('update:modelValue')[0]).toEqual([ - [ - { id: '1', name: 'Node 1' }, - { id: '2', name: 'Node 2' }, - ], - ]); - }); - it('should emit update:modelValue when unpinning nodes', async () => { - const { container, emitted, getAllByTestId } = renderComponent(); - - await waitFor(() => { - expect(container.querySelector('[data-node-name="Node 1"]')).toBeInTheDocument(); - }); - const pinButton = getAllByTestId('node-pin-button')[0]; - pinButton?.click(); - - expect(emitted('update:modelValue')).toBeTruthy(); - expect(emitted('update:modelValue')[0]).toEqual([[]]); - }); -}); diff --git a/packages/frontend/editor-ui/src/components/TestDefinition/ListDefinition/EmptyState.vue b/packages/frontend/editor-ui/src/components/TestDefinition/ListDefinition/EmptyState.vue deleted file mode 100644 index 37fc00c1ab..0000000000 --- a/packages/frontend/editor-ui/src/components/TestDefinition/ListDefinition/EmptyState.vue +++ /dev/null @@ -1,119 +0,0 @@ - - - - - diff --git a/packages/frontend/editor-ui/src/components/TestDefinition/ListDefinition/TestItem.vue b/packages/frontend/editor-ui/src/components/TestDefinition/ListDefinition/TestItem.vue deleted file mode 100644 index 2649c88ee0..0000000000 --- a/packages/frontend/editor-ui/src/components/TestDefinition/ListDefinition/TestItem.vue +++ /dev/null @@ -1,180 +0,0 @@ - - - - - diff --git a/packages/frontend/editor-ui/src/components/TestDefinition/ListRuns/TestRunsTable.vue b/packages/frontend/editor-ui/src/components/TestDefinition/ListRuns/TestRunsTable.vue deleted file mode 100644 index 86d2acbca7..0000000000 --- a/packages/frontend/editor-ui/src/components/TestDefinition/ListRuns/TestRunsTable.vue +++ /dev/null @@ -1,109 +0,0 @@ - - - - - diff --git a/packages/frontend/editor-ui/src/components/TestDefinition/composables/useTestDefinitionForm.ts b/packages/frontend/editor-ui/src/components/TestDefinition/composables/useTestDefinitionForm.ts deleted file mode 100644 index 5e14a6996b..0000000000 --- a/packages/frontend/editor-ui/src/components/TestDefinition/composables/useTestDefinitionForm.ts +++ /dev/null @@ -1,206 +0,0 @@ -import { ref, computed } from 'vue'; -import type { ComponentPublicInstance, ComputedRef } from 'vue'; -import { useTestDefinitionStore } from '@/stores/testDefinition.store.ee'; -import type AnnotationTagsDropdownEe from '@/components/AnnotationTagsDropdown.ee.vue'; -import type { N8nInput } from '@n8n/design-system'; -import type { UpdateTestDefinitionParams } from '@/api/testDefinition.ee'; -import type { EditableField, EditableFormState, EvaluationFormState } from '../types'; - -type FormRefs = { - nameInput: ComponentPublicInstance; - tagsInput: ComponentPublicInstance; -}; - -export function useTestDefinitionForm() { - const evaluationsStore = useTestDefinitionStore(); - - // State initialization - const state = ref({ - name: { - value: `My Test ${evaluationsStore.allTestDefinitions.length + 1}`, - tempValue: '', - isEditing: false, - }, - tags: { - value: [], - tempValue: [], - isEditing: false, - }, - description: { - value: '', - tempValue: '', - isEditing: false, - }, - evaluationWorkflow: { - mode: 'list', - value: '', - __rl: true, - }, - mockedNodes: [], - }); - - const isSaving = ref(false); - const fields = ref({} as FormRefs); - - const editableFields: ComputedRef<{ - name: EditableField; - tags: EditableField; - description: EditableField; - }> = computed(() => ({ - name: state.value.name, - tags: state.value.tags, - description: state.value.description, - })); - - /** - * Load test data including metrics. - */ - const loadTestData = async (testId: string, workflowId: string) => { - try { - await evaluationsStore.fetchAll({ force: true, workflowId }); - const testDefinition = evaluationsStore.testDefinitionsById[testId]; - - if (testDefinition) { - state.value.description = { - value: testDefinition.description ?? '', - isEditing: false, - tempValue: '', - }; - state.value.name = { - value: testDefinition.name ?? '', - isEditing: false, - tempValue: '', - }; - state.value.tags = { - isEditing: false, - value: testDefinition.annotationTagId ? [testDefinition.annotationTagId] : [], - tempValue: [], - }; - state.value.evaluationWorkflow = { - mode: 'list', - value: testDefinition.evaluationWorkflowId ?? '', - __rl: true, - }; - state.value.mockedNodes = testDefinition.mockedNodes ?? []; - evaluationsStore.updateRunFieldIssues(testDefinition.id); - } - } catch (error) { - console.error('Failed to load test data', error); - } - }; - - const createTest = async (workflowId: string) => { - if (isSaving.value) return; - - isSaving.value = true; - - try { - const params = { - name: state.value.name.value, - workflowId, - description: state.value.description.value, - }; - return await evaluationsStore.create(params); - } finally { - isSaving.value = false; - } - }; - - const updateTest = async (testId: string) => { - if (isSaving.value) return; - - isSaving.value = true; - - try { - if (!testId) { - throw new Error('Test ID is required for updating a test'); - } - - const params: UpdateTestDefinitionParams = { - name: state.value.name.value, - description: state.value.description.value, - }; - - if (state.value.evaluationWorkflow.value) { - params.evaluationWorkflowId = state.value.evaluationWorkflow.value.toString(); - } - - const annotationTagId = state.value.tags.value[0]; - if (annotationTagId) { - params.annotationTagId = annotationTagId; - } - params.mockedNodes = state.value.mockedNodes; - - const response = await evaluationsStore.update({ ...params, id: testId }); - return response; - } finally { - isSaving.value = false; - } - }; - - /** - * Start editing an editable field by copying `value` to `tempValue`. - */ - function startEditing(field: T) { - const fieldObj = editableFields.value[field]; - if (fieldObj.isEditing) { - // Already editing, do nothing - return; - } - - if (Array.isArray(fieldObj.value)) { - fieldObj.tempValue = [...fieldObj.value]; - } else { - fieldObj.tempValue = fieldObj.value; - } - fieldObj.isEditing = true; - } - /** - * Save changes by copying `tempValue` back into `value`. - */ - function saveChanges(field: T) { - const fieldObj = editableFields.value[field]; - fieldObj.value = Array.isArray(fieldObj.tempValue) - ? [...fieldObj.tempValue] - : fieldObj.tempValue; - fieldObj.isEditing = false; - } - - /** - * Cancel editing and revert `tempValue` from `value`. - */ - function cancelEditing(field: T) { - const fieldObj = editableFields.value[field]; - if (Array.isArray(fieldObj.value)) { - fieldObj.tempValue = [...fieldObj.value]; - } else { - fieldObj.tempValue = fieldObj.value; - } - fieldObj.isEditing = false; - } - - /** - * Handle keyboard events during editing. - */ - function handleKeydown(event: KeyboardEvent, field: T) { - if (event.key === 'Escape') { - cancelEditing(field); - } else if (event.key === 'Enter' && !event.shiftKey) { - event.preventDefault(); - saveChanges(field); - } - } - - return { - state, - fields, - isSaving: computed(() => isSaving.value), - loadTestData, - createTest, - updateTest, - startEditing, - saveChanges, - cancelEditing, - handleKeydown, - }; -} diff --git a/packages/frontend/editor-ui/src/components/TestDefinition/tests/useTestDefinitionForm.test.ts b/packages/frontend/editor-ui/src/components/TestDefinition/tests/useTestDefinitionForm.test.ts deleted file mode 100644 index b81a7dec7f..0000000000 --- a/packages/frontend/editor-ui/src/components/TestDefinition/tests/useTestDefinitionForm.test.ts +++ /dev/null @@ -1,262 +0,0 @@ -import { setActivePinia } from 'pinia'; -import { createTestingPinia } from '@pinia/testing'; -import { useTestDefinitionForm } from '../composables/useTestDefinitionForm'; -import { useTestDefinitionStore } from '@/stores/testDefinition.store.ee'; -import { mockedStore } from '@/__tests__/utils'; -import type { TestDefinitionRecord } from '@/api/testDefinition.ee'; - -const TEST_DEF_A: TestDefinitionRecord = { - id: '1', - name: 'Test Definition A', - description: 'Description A', - evaluationWorkflowId: '456', - workflowId: '123', - annotationTagId: '789', - annotationTag: null, - createdAt: '2023-01-01T00:00:00.000Z', -}; -const TEST_DEF_B: TestDefinitionRecord = { - id: '2', - name: 'Test Definition B', - workflowId: '123', - description: 'Description B', - annotationTag: null, - createdAt: '2023-01-01T00:00:00.000Z', -}; -const TEST_DEF_NEW: TestDefinitionRecord = { - id: '3', - workflowId: '123', - name: 'New Test Definition', - description: 'New Description', - annotationTag: null, - createdAt: '2023-01-01T00:00:00.000Z', -}; - -beforeEach(() => { - const pinia = createTestingPinia(); - setActivePinia(pinia); -}); - -afterEach(() => { - vi.clearAllMocks(); -}); - -describe('useTestDefinitionForm', () => { - it('should initialize with default props', () => { - const { state } = useTestDefinitionForm(); - - expect(state.value.description.value).toBe(''); - expect(state.value.name.value).toContain('My Test'); - expect(state.value.tags.value).toEqual([]); - expect(state.value.evaluationWorkflow.value).toBe(''); - }); - - it('should load test data', async () => { - const { loadTestData, state } = useTestDefinitionForm(); - const fetchSpy = vi.spyOn(useTestDefinitionStore(), 'fetchAll'); - const evaluationsStore = mockedStore(useTestDefinitionStore); - - evaluationsStore.testDefinitionsById = { - [TEST_DEF_A.id]: TEST_DEF_A, - [TEST_DEF_B.id]: TEST_DEF_B, - }; - - await loadTestData(TEST_DEF_A.id, '123'); - expect(fetchSpy).toBeCalled(); - expect(state.value.name.value).toEqual(TEST_DEF_A.name); - expect(state.value.description.value).toEqual(TEST_DEF_A.description); - expect(state.value.tags.value).toEqual([TEST_DEF_A.annotationTagId]); - expect(state.value.evaluationWorkflow.value).toEqual(TEST_DEF_A.evaluationWorkflowId); - }); - - it('should gracefully handle loadTestData when no test definition found', async () => { - const { loadTestData, state } = useTestDefinitionForm(); - const fetchSpy = vi.spyOn(useTestDefinitionStore(), 'fetchAll'); - const evaluationsStore = mockedStore(useTestDefinitionStore); - - evaluationsStore.testDefinitionsById = {}; - - await loadTestData('unknown-id', '123'); - expect(fetchSpy).toBeCalled(); - // Should remain unchanged since no definition found - expect(state.value.description.value).toBe(''); - expect(state.value.name.value).toContain('My Test'); - expect(state.value.tags.value).toEqual([]); - }); - - it('should handle errors while loading test data', async () => { - const { loadTestData } = useTestDefinitionForm(); - const fetchSpy = vi - .spyOn(useTestDefinitionStore(), 'fetchAll') - .mockRejectedValue(new Error('Fetch Failed')); - const consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => {}); - - await loadTestData(TEST_DEF_A.id, '123'); - expect(fetchSpy).toBeCalled(); - expect(consoleErrorSpy).toBeCalledWith('Failed to load test data', expect.any(Error)); - consoleErrorSpy.mockRestore(); - }); - - it('should save a new test', async () => { - const { createTest, state } = useTestDefinitionForm(); - const createSpy = vi.spyOn(useTestDefinitionStore(), 'create').mockResolvedValue(TEST_DEF_NEW); - - state.value.name.value = TEST_DEF_NEW.name; - state.value.description.value = TEST_DEF_NEW.description ?? ''; - - const newTest = await createTest('123'); - expect(createSpy).toBeCalledWith({ - name: TEST_DEF_NEW.name, - description: TEST_DEF_NEW.description, - workflowId: '123', - }); - expect(newTest).toEqual(TEST_DEF_NEW); - }); - - it('should handle errors when creating a new test', async () => { - const { createTest } = useTestDefinitionForm(); - const createSpy = vi - .spyOn(useTestDefinitionStore(), 'create') - .mockRejectedValue(new Error('Create Failed')); - - await expect(createTest('123')).rejects.toThrow('Create Failed'); - expect(createSpy).toBeCalled(); - }); - - it('should update an existing test', async () => { - const { updateTest, state } = useTestDefinitionForm(); - const updatedBTest = { - ...TEST_DEF_B, - updatedAt: '2022-01-01T00:00:00.000Z', - createdAt: '2022-01-01T00:00:00.000Z', - }; - const updateSpy = vi.spyOn(useTestDefinitionStore(), 'update').mockResolvedValue(updatedBTest); - - state.value.name.value = TEST_DEF_B.name; - state.value.description.value = TEST_DEF_B.description ?? ''; - - const updatedTest = await updateTest(TEST_DEF_A.id); - expect(updateSpy).toBeCalledWith({ - id: TEST_DEF_A.id, - name: TEST_DEF_B.name, - description: TEST_DEF_B.description, - mockedNodes: [], - }); - expect(updatedTest).toEqual(updatedBTest); - }); - - it('should throw an error if no testId is provided when updating a test', async () => { - const { updateTest } = useTestDefinitionForm(); - await expect(updateTest('')).rejects.toThrow('Test ID is required for updating a test'); - }); - - it('should handle errors when updating a test', async () => { - const { updateTest, state } = useTestDefinitionForm(); - const updateSpy = vi - .spyOn(useTestDefinitionStore(), 'update') - .mockRejectedValue(new Error('Update Failed')); - - state.value.name.value = 'Test'; - state.value.description.value = 'Some description'; - - await expect(updateTest(TEST_DEF_A.id)).rejects.toThrow('Update Failed'); - expect(updateSpy).toBeCalled(); - }); - - it('should start editing a field', () => { - const { state, startEditing } = useTestDefinitionForm(); - - startEditing('name'); - expect(state.value.name.isEditing).toBe(true); - expect(state.value.name.tempValue).toBe(state.value.name.value); - - startEditing('tags'); - expect(state.value.tags.isEditing).toBe(true); - expect(state.value.tags.tempValue).toEqual(state.value.tags.value); - }); - - it('should do nothing if startEditing is called while already editing', () => { - const { state, startEditing } = useTestDefinitionForm(); - state.value.name.isEditing = true; - state.value.name.tempValue = 'Original Name'; - - startEditing('name'); - // Should remain unchanged because it was already editing - expect(state.value.name.isEditing).toBe(true); - expect(state.value.name.tempValue).toBe('Original Name'); - }); - - it('should save changes to a field', () => { - const { state, startEditing, saveChanges } = useTestDefinitionForm(); - - // Name - startEditing('name'); - state.value.name.tempValue = 'New Name'; - saveChanges('name'); - expect(state.value.name.isEditing).toBe(false); - expect(state.value.name.value).toBe('New Name'); - - // Tags - startEditing('tags'); - state.value.tags.tempValue = ['123']; - saveChanges('tags'); - expect(state.value.tags.isEditing).toBe(false); - expect(state.value.tags.value).toEqual(['123']); - }); - - it('should cancel editing a field', () => { - const { state, startEditing, cancelEditing } = useTestDefinitionForm(); - - const originalName = state.value.name.value; - startEditing('name'); - state.value.name.tempValue = 'New Name'; - cancelEditing('name'); - expect(state.value.name.isEditing).toBe(false); - expect(state.value.name.tempValue).toBe(originalName); - - const originalTags = [...state.value.tags.value]; - startEditing('tags'); - state.value.tags.tempValue = ['123']; - cancelEditing('tags'); - expect(state.value.tags.isEditing).toBe(false); - expect(state.value.tags.tempValue).toEqual(originalTags); - }); - - it('should handle keydown - Escape', () => { - const { state, startEditing, handleKeydown } = useTestDefinitionForm(); - - startEditing('name'); - handleKeydown(new KeyboardEvent('keydown', { key: 'Escape' }), 'name'); - expect(state.value.name.isEditing).toBe(false); - - startEditing('tags'); - handleKeydown(new KeyboardEvent('keydown', { key: 'Escape' }), 'tags'); - expect(state.value.tags.isEditing).toBe(false); - }); - - it('should handle keydown - Enter', () => { - const { state, startEditing, handleKeydown } = useTestDefinitionForm(); - - startEditing('name'); - state.value.name.tempValue = 'New Name'; - handleKeydown(new KeyboardEvent('keydown', { key: 'Enter' }), 'name'); - expect(state.value.name.isEditing).toBe(false); - expect(state.value.name.value).toBe('New Name'); - - startEditing('tags'); - state.value.tags.tempValue = ['123']; - handleKeydown(new KeyboardEvent('keydown', { key: 'Enter' }), 'tags'); - expect(state.value.tags.isEditing).toBe(false); - expect(state.value.tags.value).toEqual(['123']); - }); - - it('should not save changes when shift+Enter is pressed', () => { - const { state, startEditing, handleKeydown } = useTestDefinitionForm(); - - startEditing('name'); - state.value.name.tempValue = 'New Name With Shift'; - handleKeydown(new KeyboardEvent('keydown', { key: 'Enter', shiftKey: true }), 'name'); - expect(state.value.name.isEditing).toBe(true); - expect(state.value.name.value).not.toBe('New Name With Shift'); - }); -}); diff --git a/packages/frontend/editor-ui/src/components/executions/workflow/WorkflowExecutionsCard.vue b/packages/frontend/editor-ui/src/components/executions/workflow/WorkflowExecutionsCard.vue index cb65aa94af..aa0e97f7ad 100644 --- a/packages/frontend/editor-ui/src/components/executions/workflow/WorkflowExecutionsCard.vue +++ b/packages/frontend/editor-ui/src/components/executions/workflow/WorkflowExecutionsCard.vue @@ -176,7 +176,7 @@ function onRetryMenuItemSelect(action: string): void { - + diff --git a/packages/frontend/editor-ui/src/composables/usePushConnection/handlers/executionFinished.ts b/packages/frontend/editor-ui/src/composables/usePushConnection/handlers/executionFinished.ts index b1778a8c9b..c3c95810f2 100644 --- a/packages/frontend/editor-ui/src/composables/usePushConnection/handlers/executionFinished.ts +++ b/packages/frontend/editor-ui/src/composables/usePushConnection/handlers/executionFinished.ts @@ -18,7 +18,7 @@ import { parse } from 'flatted'; import { useToast } from '@/composables/useToast'; import type { useRouter } from 'vue-router'; import { useI18n } from '@/composables/useI18n'; -import { TelemetryHelpers } from 'n8n-workflow'; +import { TelemetryHelpers, EVALUATION_TRIGGER_NODE_TYPE } from 'n8n-workflow'; import type { IWorkflowBase, ExpressionError, IDataObject, IRunExecutionData } from 'n8n-workflow'; import { codeNodeEditorEventBus, globalLinkActionsEventBus } from '@/event-bus'; import { getTriggerNodeServiceName } from '@/utils/nodeTypesUtils'; @@ -94,6 +94,34 @@ export async function executionFinished( } } + // Implicit looping: This will re-trigger the evaluation trigger if it exists on a successful execution of the workflow. + if (execution.status === 'success' && execution.data?.startData?.destinationNode === undefined) { + // check if we have an evaluation trigger in our workflow and whether it has any run data + const evalTrigger = execution.workflowData.nodes.find( + (node) => node.type === EVALUATION_TRIGGER_NODE_TYPE, + ); + const triggerRunData = evalTrigger + ? execution?.data?.resultData?.runData[evalTrigger.name] + : undefined; + + if (evalTrigger && triggerRunData !== undefined) { + const mainData = triggerRunData[0]?.data?.main[0]; + const rowsLeft = mainData ? (mainData[0]?.json?._rowsLeft as number) : 0; + + if (rowsLeft && rowsLeft > 0) { + // Find the button that belongs to the evaluation trigger, and click it. + const testId = `execute-workflow-button-${evalTrigger.name}`; + + setTimeout(() => { + const button = Array.from(document.querySelectorAll('[data-test-id]')).filter((x) => + (x as HTMLElement)?.dataset?.testId?.startsWith(testId), + )[0]; + (button as HTMLElement)?.click(); + }, 2); + } + } + } + const runExecutionData = getRunExecutionData(execution); uiStore.setProcessingExecutionResults(false); diff --git a/packages/frontend/editor-ui/src/constants.ts b/packages/frontend/editor-ui/src/constants.ts index 8a5b3d19e1..7d0e3f3f1d 100644 --- a/packages/frontend/editor-ui/src/constants.ts +++ b/packages/frontend/editor-ui/src/constants.ts @@ -217,7 +217,6 @@ export const SLACK_TRIGGER_NODE_TYPE = 'n8n-nodes-base.slackTrigger'; export const TELEGRAM_TRIGGER_NODE_TYPE = 'n8n-nodes-base.telegramTrigger'; export const FACEBOOK_LEAD_ADS_TRIGGER_NODE_TYPE = 'n8n-nodes-base.facebookLeadAdsTrigger'; export const RESPOND_TO_WEBHOOK_NODE_TYPE = 'n8n-nodes-base.respondToWebhook'; -export const EVALUATION_TRIGGER_NODE_TYPE = 'n8n-nodes-base.evaluationTrigger'; export const CREDENTIAL_ONLY_NODE_PREFIX = 'n8n-creds-base'; export const CREDENTIAL_ONLY_HTTP_NODE_VERSION = 4.1; @@ -279,6 +278,8 @@ export const NODE_CREATOR_OPEN_SOURCES: Record< NODE_CONNECTION_DROP: 'node_connection_drop', NOTICE_ERROR_MESSAGE: 'notice_error_message', CONTEXT_MENU: 'context_menu', + ADD_EVALUATION_NODE_BUTTON: 'add_evaluation_node_button', + ADD_EVALUATION_TRIGGER_BUTTON: 'add_evaluation_trigger_button', '': '', }; export const CORE_NODES_CATEGORY = 'Core Nodes'; @@ -310,6 +311,7 @@ export const AI_CATEGORY_TEXT_SPLITTERS = 'Text Splitters'; export const AI_CATEGORY_OTHER_TOOLS = 'Other Tools'; export const AI_CATEGORY_ROOT_NODES = 'Root Nodes'; export const AI_CATEGORY_MCP_NODES = 'Model Context Protocol'; +export const AI_EVALUATION = 'Evaluation'; export const AI_UNCATEGORIZED_CATEGORY = 'Miscellaneous'; export const AI_CODE_TOOL_LANGCHAIN_NODE_TYPE = '@n8n/n8n-nodes-langchain.toolCode'; export const AI_WORKFLOW_TOOL_LANGCHAIN_NODE_TYPE = '@n8n/n8n-nodes-langchain.toolWorkflow'; @@ -547,11 +549,9 @@ export const enum VIEWS { COMMUNITY_NODES = 'CommunityNodes', WORKFLOWS = 'WorkflowsView', WORKFLOW_EXECUTIONS = 'WorkflowExecutions', - TEST_DEFINITION = 'TestDefinition', - TEST_DEFINITION_EDIT = 'TestDefinitionEdit', - TEST_DEFINITION_RUNS_COMPARE = 'TestDefinitionRunsCompare', - TEST_DEFINITION_RUNS_DETAIL = 'TestDefinitionRunsDetail', - NEW_TEST_DEFINITION = 'NewTestDefinition', + EVALUATION = 'Evaluation', + EVALUATION_EDIT = 'EvaluationEdit', + EVALUATION_RUNS_DETAIL = 'EvaluationRunsDetail', USAGE = 'Usage', LOG_STREAMING_SETTINGS = 'LogStreamingSettingsView', SSO_SETTINGS = 'SSoSettings', @@ -659,7 +659,7 @@ export const enum MAIN_HEADER_TABS { WORKFLOW = 'workflow', EXECUTIONS = 'executions', SETTINGS = 'settings', - TEST_DEFINITION = 'testDefinition', + EVALUATION = 'evaluation', } export const CURL_IMPORT_NOT_SUPPORTED_PROTOCOLS = [ 'ftp', @@ -717,12 +717,6 @@ export const KEEP_AUTH_IN_NDV_FOR_NODES = [ export const MAIN_AUTH_FIELD_NAME = 'authentication'; export const NODE_RESOURCE_FIELD_NAME = 'resource'; -export const EVALUATION_TRIGGER = { - name: '031-evaluation-trigger', - control: 'control', - variant: 'variant', -}; - export const EASY_AI_WORKFLOW_EXPERIMENT = { name: '026_easy_ai_workflow', control: 'control', diff --git a/packages/frontend/editor-ui/src/plugins/i18n/locales/en.json b/packages/frontend/editor-ui/src/plugins/i18n/locales/en.json index 3cffb8978e..2539086eaa 100644 --- a/packages/frontend/editor-ui/src/plugins/i18n/locales/en.json +++ b/packages/frontend/editor-ui/src/plugins/i18n/locales/en.json @@ -58,7 +58,7 @@ "generic.executions": "Executions", "generic.tag_plural": "Tags", "generic.tag": "Tag | {count} Tags", - "generic.tests": "Tests", + "generic.tests": "Evaluations", "generic.optional": "optional", "generic.or": "or", "generic.clickToCopy": "Click to copy", @@ -2994,142 +2994,115 @@ "communityPlusModal.notice": "Included features may change, but once unlocked, you'll keep them forever.", "executeWorkflowTrigger.createNewSubworkflow": "Create a Sub-Workflow in {projectName}", "executeWorkflowTrigger.createNewSubworkflow.noProject": "Create a New Sub-Workflow", - "testDefinition.edit.descriptionPlaceholder": "Enter test description", - "testDefinition.edit.showConfig": "Show config", - "testDefinition.edit.hideConfig": "Hide config", - "testDefinition.edit.backButtonTitle": "Back to Workflow Evaluation", - "testDefinition.edit.namePlaceholder": "Enter test name", - "testDefinition.edit.selectTag": "Select tag...", - "testDefinition.edit.tagsHelpText": "Executions with this tag will be added as test cases to this test.", - "testDefinition.edit.workflowSelectorLabel": "Use a second workflow to make the comparison", - "testDefinition.edit.workflowSelectorDisplayName": "Workflow", - "testDefinition.edit.workflowSelectorTitle": "Use a second workflow to make the comparison", - "testDefinition.edit.workflowSelectorHelpText": "This workflow will be called once for each test case.", - "testDefinition.edit.updateTest": "Update test", - "testDefinition.edit.saveTest": "Save test", - "testDefinition.edit.runTest": "Run test", - "testDefinition.edit.testSaved": "Test saved", - "testDefinition.edit.testSaveFailed": "Failed to save test", - "testDefinition.edit.description": "Description", - "testDefinition.edit.description.description": "Add details about what this test evaluates and what success looks like", - "testDefinition.edit.pinNodes.noNodes.title": "No nodes to pin", - "testDefinition.edit.pinNodes.noNodes.description": "Your workflow needs to have at least one node to run a test", - "testDefinition.edit.tagName": "Tag name", - "testDefinition.edit.step.intro": "When running a test", - "testDefinition.edit.step.executions": "1. Fetch benchmark executions | 1. Fetch {count} benchmark execution | 1. Fetch {count} benchmark executions", - "testDefinition.edit.step.tag": "Any past executions tagged {tag} are fetched", - "testDefinition.edit.step.tag.placeholder": "Enter new tag name", - "testDefinition.edit.step.tag.validation.required": "Tag name is required", - "testDefinition.edit.step.tag.validation.tooLong": "Tag name is too long", - "testDefinition.edit.step.executions.tooltip": "Past executions are used as benchmark data. Each one will be re-executed during the test to check whether performance has changed.", - "testDefinition.edit.step.mockedNodes": "2. Mock nodes |2. Mock {count} node |2. Mock {count} nodes", - "testDefinition.edit.step.nodes.tooltip": "Mocked nodes have their data replayed rather than being re-executed. Do this to avoid calling external services, or save time executing.", - "testDefinition.edit.step.reRunExecutions": "3. Re-run executions", - "testDefinition.edit.step.reRunExecutions.tooltip": "Each past execution is re-run using the latest version of the workflow being tested", - "testDefinition.edit.step.compareExecutions": "4. Compare each past and new execution", - "testDefinition.edit.step.compareExecutions.tooltip": "Each past execution is compared with its new equivalent to check how similar they are. This is done using a separate evaluation workflow: it receives the two execution versions as input, and outputs metrics.", - "testDefinition.edit.step.collapse": "Collapse", - "testDefinition.edit.step.configure": "Configure", - "testDefinition.edit.selectNodes": "Pin nodes to mock them", - "testDefinition.edit.modal.description": "Choose which past data to keep when re-running the execution(s). Any mocked node will be replayed rather than re-executed. The trigger is always mocked.", - "testDefinition.edit.runExecution": "Run execution", - "testDefinition.edit.pastRuns": "Past runs", - "testDefinition.edit.pastRuns.total": "No runs | Past run | Past runs", - "testDefinition.edit.nodesPinning.pinButtonTooltip": "Use benchmark data for this node during evaluation execution", - "testDefinition.edit.nodesPinning.pinButtonTooltip.pinned": "This node will not be re-executed", - "testDefinition.edit.nodesPinning.triggerTooltip": "Trigger nodes are mocked by default", - "testDefinition.edit.saving": "Saving...", - "testDefinition.edit.saved": "Test saved", - "testDefinition.list.testDeleted": "Test deleted", - "testDefinition.list.tests": "Tests", - "testDefinition.list.evaluations": "Evaluation", - "testDefinition.list.unitTests.badge": "Coming soon", - "testDefinition.list.unitTests.title": "Unit test", - "testDefinition.list.unitTests.description": "Validate workflow logic by checking for specific conditions", - "testDefinition.list.unitTests.cta": "Register interest", - "testDefinition.list.createNew": "Create new evaluation", - "testDefinition.list.runAll": "Run all evaluations", - "testDefinition.list.actionDescription": "Measure changes in output by comparing results over time (for AI workflows)", - "testDefinition.list.actionButton": "Create an Evaluation", - "testDefinition.list.actionButton.unregistered": "Unlock evaluation", - "testDefinition.list.actionDescription.registered": "Your plan allows one evaluation", - "testDefinition.list.actionDescription.unregistered": "Unlock a free test when you register", - "testDefinition.list.actionDescription.atLimit": "You've reached your evaluation limit, upgrade to add more", - "testDefinition.list.testRuns": "No test runs | {count} test run | {count} test runs", - "testDefinition.list.lastRun": "Ran", - "testDefinition.list.running": "Running", - "testDefinition.list.errorRate": "Error rate: {errorRate}", - "testDefinition.list.testStartError": "Failed to start test run", - "testDefinition.list.testStarted": "Test run started", - "testDefinition.list.testCancelled": "Test run cancelled", - "testDefinition.list.loadError": "Failed to load tests", - "testDefinition.list.item.tests": "No test cases | {count} test case | {count} test cases", - "testDefinition.list.item.missingFields": "No fields missing | {count} field missing| {count} fields missing", - "testDefinition.listRuns.status.new": "New", - "testDefinition.listRuns.status.running": "Running", - "testDefinition.listRuns.status.evaluating": "Evaluating", - "testDefinition.listRuns.status.completed": "Completed", - "testDefinition.listRuns.status.cancelled": "Cancelled", - "testDefinition.listRuns.status.error": "Error", - "testDefinition.listRuns.status.success": "Success", - "testDefinition.listRuns.status.warning": "Warning", - "testDefinition.listRuns.metricsOverTime": "Metrics over time", - "testDefinition.listRuns.status": "Status", - "testDefinition.listRuns.runNumber": "Run", - "testDefinition.listRuns.runDate": "Run date", - "testDefinition.listRuns.runStatus": "Run status", - "testDefinition.listRuns.noRuns": "No test runs", - "testDefinition.listRuns.noRuns.description": "Run a test to see the results here", - "testDefinition.listRuns.deleteRuns": "No runs to delete | Delete {count} run | Delete {count} runs", - "testDefinition.listRuns.noRuns.button": "Run Test", - "testDefinition.listRuns.error.noPastExecutions": "No executions added to the specified tag", - "testDefinition.listRuns.error.evaluationWorkflowNotFound": "Selected evaluation workflow does not exist. {link}.", - "testDefinition.listRuns.error.evaluationWorkflowNotFound.solution": "Fix test configuration", - "testDefinition.runDetail.ranAt": "Ran at", - "testDefinition.runDetail.testCase": "Test case", - "testDefinition.runDetail.testCase.id": "Test case ID", - "testDefinition.runDetail.testCase.status": "Test case status", - "testDefinition.runDetail.totalCases": "Total cases", - "testDefinition.runDetail.error.mockedNodeMissing": "Output for a mocked node does not exist in benchmark execution.{link}.", - "testDefinition.runDetail.error.mockedNodeMissing.solution": "Fix test configuration", - "testDefinition.runDetail.error.executionFailed": "Failed to execute workflow with benchmark trigger. {link}.", - "testDefinition.runDetail.error.executionFailed.solution": "View execution", - "testDefinition.runDetail.error.evaluationFailed": "Failed to execute the evaluation workflow. {link}.", - "testDefinition.runDetail.error.evaluationFailed.solution": "View evaluation execution", - "testDefinition.runDetail.error.triggerNoLongerExists": "Trigger in benchmark execution no longer exists in workflow.{link}.", - "testDefinition.runDetail.error.triggerNoLongerExists.solution": "View benchmark", - "testDefinition.runDetail.error.invalidMetrics": "Evaluation workflow returned invalid metrics. Only numeric values are expected. View evaluation execution. {link}.", - "testDefinition.runDetail.error.invalidMetrics.solution": "View evaluation execution", - "testDefinition.runTest": "Run Test", - "testDefinition.cancelTestRun": "Cancel Test Run", - "testDefinition.notImplemented": "This feature is not implemented yet!", - "testDefinition.viewDetails": "View Details", - "testDefinition.editTest": "Edit Test", - "testDefinition.deleteTest": "Delete Test", - "testDefinition.deleteTest.warning": "The test and all associated runs will be removed. This cannot be undone", - "testDefinition.testIsRunning": "Test is running. Please wait for it to finish.", - "testDefinition.completeConfig": "Complete the configuration below to run the test:", - "testDefinition.configError.noEvaluationTag": "No evaluation tag set", - "testDefinition.configError.noExecutionsAddedToTag": "No executions added to this tag", - "testDefinition.configError.noEvaluationWorkflow": "No evaluation workflow set", - "testDefinition.configError.noMetrics": "No metrics set", - "testDefinition.workflowInput.subworkflowName": "Evaluation workflow for {name}", - "testDefinition.workflowInput.subworkflowName.default": "My Evaluation Sub-Workflow", - "testDefinition.executions.addTo": "Add to Test", - "testDefinition.executions.addTo.new": "Add to Test", - "testDefinition.executions.addTo.existing": "Add to \"{name}\"", - "testDefinition.executions.addedTo": "Added to \"{name}\"", - "testDefinition.executions.removeFrom": "Remove from \"{name}\"", - "testDefinition.executions.removedFrom": "Execution removed from \"{name}\"", - "testDefinition.executions.toast.addedTo": "Go back to \"{name}\"", - "testDefinition.executions.tooltip.addTo": "Add to new test", - "testDefinition.executions.tooltip.noExecutions": "Evaluation executions can not be added to tests", - "testDefinition.executions.tooltip.onlySuccess": "Only successful executions can be added to tests", - "testDefinition.workflow.createNew": "Create new evaluation workflow", - "testDefinition.workflow.createNew.or": "or use existing evaluation sub-workflow", - "testDefinition.executions.toast.addedTo.title": "Execution added to test ", - "testDefinition.executions.toast.closeTab": "Close this tab", - "testDefinition.executions.toast.removedFrom.title": "Execution removed from test ", + "evaluation.listRuns.status.new": "New", + "evaluation.listRuns.status.running": "Running", + "evaluation.listRuns.status.evaluating": "Evaluating", + "evaluation.listRuns.status.completed": "Completed", + "evaluation.listRuns.status.cancelled": "Cancelled", + "evaluation.listRuns.status.error": "Error", + "evaluation.listRuns.status.success": "Success", + "evaluation.listRuns.status.warning": "Warning", + "evaluation.listRuns.metricsOverTime": "Metrics over time", + "evaluation.listRuns.status": "Status", + "evaluation.listRuns.runListHeader": "All runs", + "evaluation.listRuns.testCasesListHeader": "Run #{index}", + "evaluation.listRuns.runNumber": "Run", + "evaluation.listRuns.runDate": "Run date", + "evaluation.listRuns.runStatus": "Run status", + "evaluation.listRuns.noRuns": "No test runs", + "evaluation.listRuns.pastRuns.total": "No runs | All runs | All runs", + "evaluation.listRuns.noRuns.description": "Run a test to see the results here", + "evaluation.listRuns.deleteRuns": "No runs to delete | Delete {count} run | Delete {count} runs", + "evaluation.listRuns.noRuns.button": "Run Test", + "evaluation.listRuns.toast.error.fetchTestCases": "Failed to load run details", + "evaluation.listRuns.error.testCasesNotFound": "No matching rows in dataset{description}", + "evaluation.listRuns.error.testCasesNotFound.description": "Check any filters or limits set in the evaluation trigger", + "evaluation.listRuns.error.executionInterrupted": "Test run was interrupted", + "evaluation.listRuns.error.unknownError": "Execution error{description}", + "evaluation.listRuns.error.cantFetchTestRuns": "Couldn’t fetch test runs", + "evaluation.listRuns.error.cantStartTestRun": "Couldn’t start test run", + "evaluation.listRuns.error.unknownError.description": "Click for more details", + "evaluation.listRuns.error.evaluationTriggerNotFound": "Evaluation trigger missing", + "evaluation.listRuns.error.evaluationTriggerNotConfigured": "Evaluation trigger is not configured", + "evaluation.listRuns.error.evaluationTriggerDisabled": "Evaluation trigger is disabled", + "evaluation.listRuns.error.setOutputsNodeNotFound": "No 'Set outputs' node in workflow", + "evaluation.listRuns.error.setOutputsNodeNotConfigured": "'Set outputs' node is not configured", + "evaluation.listRuns.error.setMetricsNodeNotFound": "No 'Set metrics' node in workflow", + "evaluation.listRuns.error.setMetricsNodeNotConfigured": "'Set metrics' node is not configured", + "evaluation.listRuns.error.cantFetchTestCases": "Couldn’t fetch test cases{description}", + "evaluation.listRuns.error.cantFetchTestCases.description": "Check the Google Sheet setup in the evaluation trigger", + "evaluation.runDetail.ranAt": "Ran at", + "evaluation.runDetail.testCase": "Test case", + "evaluation.runDetail.testCase.id": "Test case ID", + "evaluation.runDetail.testCase.status": "Test case status", + "evaluation.runDetail.totalCases": "Total cases", + "evaluation.runDetail.error.mockedNodeMissing": "Output for a mocked node does not exist in benchmark execution.{link}.", + "evaluation.runDetail.error.mockedNodeMissing.solution": "Fix test configuration", + "evaluation.runDetail.error.executionFailed": "Failed to execute workflow", + "evaluation.runDetail.error.executionFailed.solution": "View execution", + "evaluation.runDetail.error.datasetTriggerNotFound": "Dataset trigger does not exist in the workflow.{link}.", + "evaluation.runDetail.error.datasetTriggerNotFound.solution": "View workflow", + "evaluation.runDetail.error.invalidMetrics": "Evaluation metrics node returned invalid metrics. Only numeric values are expected. View workflow. {link}.", + "evaluation.runDetail.error.invalidMetrics.solution": "View workflow", + "evaluation.runDetail.error.unknownError": "An unknown error occurred", + "evaluation.runDetail.error.unknownError.solution": "View execution", + "evaluation.runDetail.error.noMetricsCollected": "No 'Set metrics' node executed", + "evaluation.runDetail.error.partialCasesFailed": "Finished with errors", + "evaluation.runTest": "Run Test", + "evaluation.cancelTestRun": "Cancel Test Run", + "evaluation.notImplemented": "This feature is not implemented yet!", + "evaluation.viewDetails": "View Details", + "evaluation.editTest": "Edit Test", + "evaluation.deleteTest": "Delete Test", + "evaluation.deleteTest.warning": "The test and all associated runs will be removed. This cannot be undone", + "evaluation.testIsRunning": "Test is running. Please wait for it to finish.", + "evaluation.completeConfig": "Complete the configuration below to run the test:", + "evaluation.configError.noEvaluationTag": "No evaluation tag set", + "evaluation.configError.noExecutionsAddedToTag": "No executions added to this tag", + "evaluation.configError.noEvaluationWorkflow": "No evaluation workflow set", + "evaluation.configError.noMetrics": "No metrics set", + "evaluation.workflowInput.subworkflowName": "Evaluation workflow for {name}", + "evaluation.workflowInput.subworkflowName.default": "My Evaluation Sub-Workflow", + "evaluation.executions.addTo": "Add to Test", + "evaluation.executions.addTo.new": "Add to Test", + "evaluation.executions.addTo.existing": "Add to \"{name}\"", + "evaluation.executions.addedTo": "Added to \"{name}\"", + "evaluation.executions.removeFrom": "Remove from \"{name}\"", + "evaluation.executions.removedFrom": "Execution removed from \"{name}\"", + "evaluation.executions.toast.addedTo": "Go back to \"{name}\"", + "evaluation.executions.tooltip.addTo": "Add to new test", + "evaluation.executions.tooltip.noExecutions": "Evaluation executions can not be added to tests", + "evaluation.executions.tooltip.onlySuccess": "Only successful executions can be added to tests", + "evaluation.workflow.createNew": "Create new evaluation workflow", + "evaluation.workflow.createNew.or": "or use existing evaluation sub-workflow", + "evaluation.executions.toast.addedTo.title": "Execution added to test ", + "evaluation.executions.toast.closeTab": "Close this tab", + "evaluation.executions.toast.removedFrom.title": "Execution removed from test ", + "evaluations.paywall.title": "Register to enable evaluation", + "evaluations.paywall.description": "Register your Community instance to unlock the evaluation feature", + "evaluations.paywall.cta": "Register instance", + "evaluations.setupWizard.title": "Test your AI workflow over multiple inputs", + "evaluations.setupWizard.description": "Evaluations measure performance against a test dataset.", + "evaluations.setupWizard.moreInfo": "More info", + "evaluations.setupWizard.stepHeader.optional": "Optional", + "evaluations.setupWizard.step1.title": "Wire up a test dataset", + "evaluations.setupWizard.step1.item1": "Set up a Google Sheet with one input per row", + "evaluations.setupWizard.step1.item2": "Add an evaluation trigger to your workflow and wire it up", + "evaluations.setupWizard.step1.button": "Add evaluation trigger", + "evaluations.setupWizard.step2.title": "Write workflow outputs back to dataset", + "evaluations.setupWizard.step2.item1": "Add a 'set outputs' operation to log each output back to Google Sheets", + "evaluations.setupWizard.step2.button": "Add 'set outputs' node", + "evaluations.setupWizard.step3.title": "Set up a quality score", + "evaluations.setupWizard.step3.item1": "Calculate a score, e.g. by comparing expected and actual outputs", + "evaluations.setupWizard.step3.item2": "Add a 'set metrics' operation to log the score", + "evaluations.setupWizard.step3.button": "Add 'Set metrics' node", + "evaluations.setupWizard.step3.skip": "Skip", + "evaluations.setupWizard.step3.notice": "Your plan supports custom metrics for one workflow only. {link}", + "evaluations.setupWizard.step3.notice.link": "See plans", + "evaluations.setupWizard.step4.title": "Run evaluation", + "evaluations.setupWizard.step4.button": "Run evaluation", + "evaluations.setupWizard.step4.altButton": "Run in editor", + "evaluations.setupWizard.limitReached": "Limit reached. Your plan includes custom metrics for one workflow only. Upgrade for unlimited use or delete the workflow with existing evaluation runs.", "freeAi.credits.callout.claim.title": "Get {credits} free OpenAI API credits", "freeAi.credits.callout.claim.button.label": "Claim credits", "freeAi.credits.callout.success.title.part1": "Claimed {credits} free OpenAI API credits! Please note these free credits are only for the following models:", diff --git a/packages/frontend/editor-ui/src/router.ts b/packages/frontend/editor-ui/src/router.ts index 4c525d1389..b601be8ff7 100644 --- a/packages/frontend/editor-ui/src/router.ts +++ b/packages/frontend/editor-ui/src/router.ts @@ -11,7 +11,7 @@ import { useSettingsStore } from '@/stores/settings.store'; import { useTemplatesStore } from '@/stores/templates.store'; import { useUIStore } from '@/stores/ui.store'; import { useSSOStore } from '@/stores/sso.store'; -import { useTestDefinitionStore } from '@/stores/testDefinition.store.ee'; +import { useEvaluationStore } from '@/stores/evaluation.store.ee'; import { EnterpriseEditionFeature, VIEWS, EDITABLE_CANVAS_VIEWS } from '@/constants'; import { useTelemetry } from '@/composables/useTelemetry'; import { middleware } from '@/utils/rbac/middleware'; @@ -20,7 +20,7 @@ import { initializeAuthenticatedFeatures, initializeCore } from '@/init'; import { tryToParseNumber } from '@/utils/typesUtils'; import { projectsRoutes } from '@/routes/projects.routes'; import { insightsRoutes } from '@/features/insights/insights.router'; -import TestDefinitionRunDetailView from './views/TestDefinition/TestDefinitionRunDetailView.vue'; +import TestRunDetailView from '@/views/Evaluations.ee/TestRunDetailView.vue'; const ChangePasswordView = async () => await import('./views/ChangePasswordView.vue'); const ErrorView = async () => await import('./views/ErrorView.vue'); @@ -62,14 +62,9 @@ const SettingsExternalSecrets = async () => await import('./views/SettingsExtern const WorkerView = async () => await import('./views/WorkerView.vue'); const WorkflowHistory = async () => await import('@/views/WorkflowHistory.vue'); const WorkflowOnboardingView = async () => await import('@/views/WorkflowOnboardingView.vue'); -const TestDefinitionListView = async () => - await import('./views/TestDefinition/TestDefinitionListView.vue'); -const TestDefinitionNewView = async () => - await import('./views/TestDefinition/TestDefinitionNewView.vue'); -const TestDefinitionEditView = async () => - await import('./views/TestDefinition/TestDefinitionEditView.vue'); -const TestDefinitionRootView = async () => - await import('./views/TestDefinition/TestDefinitionRootView.vue'); +const EvaluationsView = async () => await import('@/views/Evaluations.ee/EvaluationsView.vue'); +const EvaluationRootView = async () => + await import('@/views/Evaluations.ee/EvaluationsRootView.vue'); function getTemplatesRedirect(defaultRedirect: VIEWS[keyof VIEWS]): { name: string } | false { const settingsStore = useSettingsStore(); @@ -264,48 +259,35 @@ export const routes: RouteRecordRaw[] = [ }, { path: '/workflow/:name/evaluation', + name: VIEWS.EVALUATION, components: { - default: TestDefinitionRootView, + default: EvaluationRootView, header: MainHeader, sidebar: MainSidebar, }, - props: true, + props: { + default: true, + }, meta: { keepWorkflowAlive: true, middleware: ['authenticated', 'custom'], middlewareOptions: { - custom: () => useTestDefinitionStore().isFeatureEnabled, + custom: () => useEvaluationStore().isFeatureEnabled, }, }, children: [ { path: '', - name: VIEWS.TEST_DEFINITION, - component: TestDefinitionListView, + name: VIEWS.EVALUATION_EDIT, + component: EvaluationsView, props: true, }, { - path: 'new', - name: VIEWS.NEW_TEST_DEFINITION, - component: TestDefinitionNewView, + path: 'test-runs/:runId', + name: VIEWS.EVALUATION_RUNS_DETAIL, + component: TestRunDetailView, props: true, }, - { - path: ':testId', - name: VIEWS.TEST_DEFINITION_EDIT, - props: true, - components: { - default: TestDefinitionEditView, - }, - }, - { - path: ':testId/runs/:runId', - name: VIEWS.TEST_DEFINITION_RUNS_DETAIL, - props: true, - components: { - default: TestDefinitionRunDetailView, - }, - }, ], }, { diff --git a/packages/frontend/editor-ui/src/stores/evaluation.store.ee.test.ts b/packages/frontend/editor-ui/src/stores/evaluation.store.ee.test.ts new file mode 100644 index 0000000000..8cfa44e52d --- /dev/null +++ b/packages/frontend/editor-ui/src/stores/evaluation.store.ee.test.ts @@ -0,0 +1,208 @@ +import { createPinia, setActivePinia } from 'pinia'; +import { useEvaluationStore } from '@/stores/evaluation.store.ee'; // Adjust the import path as necessary +import { useRootStore } from '@n8n/stores/useRootStore'; +import { usePostHog } from '@/stores/posthog.store'; +import { useAnnotationTagsStore } from '@/stores/tags.store'; +import type { TestRunRecord } from '@/api/evaluation.ee'; +import { mockedStore } from '@/__tests__/utils'; + +const { getTestRuns, getTestRun, startTestRun, deleteTestRun } = vi.hoisted(() => ({ + getTestRuns: vi.fn(), + getTestRun: vi.fn(), + startTestRun: vi.fn(), + deleteTestRun: vi.fn(), +})); + +vi.mock('@/api/evaluation.ee', () => ({ + getTestRuns, + getTestRun, + startTestRun, + deleteTestRun, +})); + +vi.mock('@n8n/stores/useRootStore', () => ({ + useRootStore: vi.fn(() => ({ + restApiContext: { instanceId: 'test-instance-id' }, + })), +})); + +const TEST_RUN: TestRunRecord = { + id: 'run1', + workflowId: '1', + status: 'completed', + metrics: { metric1: 0.75 }, + createdAt: '2024-01-01', + updatedAt: '2024-01-01', + runAt: '2024-01-01', + completedAt: '2024-01-01', +}; + +describe('evaluation.store.ee', () => { + let store: ReturnType; + let rootStoreMock: ReturnType; + let posthogStoreMock: ReturnType; + + beforeEach(() => { + vi.restoreAllMocks(); + setActivePinia(createPinia()); + store = useEvaluationStore(); + rootStoreMock = useRootStore(); + posthogStoreMock = usePostHog(); + + mockedStore(useAnnotationTagsStore).fetchAll = vi.fn().mockResolvedValue([]); + + getTestRuns.mockResolvedValue([TEST_RUN]); + getTestRun.mockResolvedValue(TEST_RUN); + startTestRun.mockResolvedValue({ success: true }); + deleteTestRun.mockResolvedValue({ success: true }); + }); + + test('Initialization', () => { + expect(store.testRunsById).toEqual({}); + expect(store.isLoading).toBe(false); + }); + + describe('Computed Properties', () => { + test('isFeatureEnabled', () => { + posthogStoreMock.isFeatureEnabled = vi.fn().mockReturnValue(false); + + expect(store.isFeatureEnabled).toBe(false); + posthogStoreMock.isFeatureEnabled = vi.fn().mockReturnValue(true); + + expect(store.isFeatureEnabled).toBe(true); + }); + }); + + describe('Test Runs', () => { + test('Fetching Test Runs', async () => { + const result = await store.fetchTestRuns('1'); + + expect(getTestRuns).toHaveBeenCalledWith(rootStoreMock.restApiContext, '1'); + expect(store.testRunsById).toEqual({ + run1: TEST_RUN, + }); + expect(result).toEqual([TEST_RUN]); + }); + + test('Getting specific Test Run', async () => { + const params = { workflowId: '1', runId: 'run1' }; + const result = await store.getTestRun(params); + + expect(getTestRun).toHaveBeenCalledWith(rootStoreMock.restApiContext, params); + expect(store.testRunsById).toEqual({ + run1: TEST_RUN, + }); + expect(result).toEqual(TEST_RUN); + }); + + test('Starting Test Run', async () => { + const result = await store.startTestRun('1'); + + expect(startTestRun).toHaveBeenCalledWith(rootStoreMock.restApiContext, '1'); + expect(result).toEqual({ success: true }); + }); + + test('Deleting Test Run', async () => { + store.testRunsById = { run1: TEST_RUN }; + const params = { workflowId: '1', runId: 'run1' }; + + const result = await store.deleteTestRun(params); + + expect(deleteTestRun).toHaveBeenCalledWith(rootStoreMock.restApiContext, params); + expect(store.testRunsById).toEqual({}); + expect(result).toEqual({ success: true }); + }); + + test('Getting Test Runs by Test ID', () => { + store.testRunsById = { + run1: TEST_RUN, + run2: { ...TEST_RUN, id: 'run2', workflowId: '2' }, + }; + + const runs = store.testRunsByWorkflowId['1']; + + expect(runs).toEqual([TEST_RUN]); + }); + }); + + describe('Polling Mechanism', () => { + beforeEach(() => { + vi.useFakeTimers(); + }); + + afterEach(() => { + vi.useRealTimers(); + }); + + test('should start polling for running test runs', async () => { + const runningTestRun = { + ...TEST_RUN, + status: 'running', + }; + + getTestRuns.mockResolvedValueOnce([runningTestRun]); + + // First call returns running status + getTestRun.mockResolvedValueOnce({ + ...runningTestRun, + status: 'running', + }); + + // Second call returns completed status + getTestRun.mockResolvedValueOnce({ + ...runningTestRun, + status: 'completed', + }); + + await store.fetchTestRuns('1'); + + expect(store.testRunsById).toEqual({ + run1: runningTestRun, + }); + + // Advance timer to trigger the first poll + await vi.advanceTimersByTimeAsync(1000); + + // Verify first poll happened + expect(getTestRun).toHaveBeenCalledWith(rootStoreMock.restApiContext, { + workflowId: '1', + runId: 'run1', + }); + + // Advance timer again + await vi.advanceTimersByTimeAsync(1000); + + // Verify polling stopped after status changed to completed + expect(getTestRun).toHaveBeenCalledTimes(2); + }); + + test('should cleanup polling timeouts', async () => { + const runningTestRun = { + ...TEST_RUN, + status: 'running', + }; + + getTestRuns.mockResolvedValueOnce([runningTestRun]); + getTestRun.mockResolvedValue({ + ...runningTestRun, + status: 'running', + }); + + await store.fetchTestRuns('1'); + + // Wait for the first poll to complete + await vi.runOnlyPendingTimersAsync(); + + // Clear mock calls from initial setup + getTestRun.mockClear(); + + store.cleanupPolling(); + + // Advance timer + await vi.advanceTimersByTimeAsync(1000); + + // Verify no more polling happened after cleanup + expect(getTestRun).not.toHaveBeenCalled(); + }); + }); +}); diff --git a/packages/frontend/editor-ui/src/stores/evaluation.store.ee.ts b/packages/frontend/editor-ui/src/stores/evaluation.store.ee.ts new file mode 100644 index 0000000000..2b4ea06e88 --- /dev/null +++ b/packages/frontend/editor-ui/src/stores/evaluation.store.ee.ts @@ -0,0 +1,209 @@ +import { defineStore } from 'pinia'; +import { computed, ref } from 'vue'; +import { useRootStore } from '@n8n/stores/useRootStore'; +import * as evaluationsApi from '@/api/evaluation.ee'; +import type { TestCaseExecutionRecord, TestRunRecord } from '@/api/evaluation.ee'; +import { usePostHog } from './posthog.store'; +import { WORKFLOW_EVALUATION_EXPERIMENT } from '@/constants'; +import { STORES } from '@n8n/stores'; +import { useWorkflowsStore } from '@/stores/workflows.store'; +import { EVALUATION_NODE_TYPE, EVALUATION_TRIGGER_NODE_TYPE, NodeHelpers } from 'n8n-workflow'; +import { useNodeTypesStore } from '@/stores/nodeTypes.store'; +import { useSettingsStore } from '@/stores/settings.store'; + +export const useEvaluationStore = defineStore( + STORES.EVALUATION, + () => { + // State + const loadingTestRuns = ref(false); + const fetchedAll = ref(false); + const testRunsById = ref>({}); + const testCaseExecutionsById = ref>({}); + const pollingTimeouts = ref>({}); + + // Store instances + const posthogStore = usePostHog(); + const rootStore = useRootStore(); + const workflowsStore = useWorkflowsStore(); + const nodeTypesStore = useNodeTypesStore(); + const settingsStore = useSettingsStore(); + + // Computed + + // Enable with `window.featureFlags.override('025_workflow_evaluation', true)` + const isFeatureEnabled = computed(() => + posthogStore.isFeatureEnabled(WORKFLOW_EVALUATION_EXPERIMENT), + ); + + const isEvaluationEnabled = computed( + () => + posthogStore.isFeatureEnabled(WORKFLOW_EVALUATION_EXPERIMENT) && + settingsStore.settings.evaluation.quota !== 0, + ); + + const isLoading = computed(() => loadingTestRuns.value); + + const testRunsByWorkflowId = computed(() => { + return Object.values(testRunsById.value).reduce( + (acc: Record, run) => { + if (!acc[run.workflowId]) { + acc[run.workflowId] = []; + } + acc[run.workflowId].push(run); + return acc; + }, + {}, + ); + }); + + const evaluationTriggerExists = computed(() => { + return workflowsStore.workflow.nodes.some( + (node) => node.type === EVALUATION_TRIGGER_NODE_TYPE, + ); + }); + + function evaluationNodeExist(operation: string) { + return workflowsStore.workflow.nodes.some((node) => { + if (node.type !== EVALUATION_NODE_TYPE) { + return false; + } + + const nodeType = nodeTypesStore.getNodeType(node.type, node.typeVersion); + if (!nodeType) return false; + + const nodeParameters = NodeHelpers.getNodeParameters( + nodeType.properties, + node.parameters, + true, + false, + node, + nodeType, + ); + + return nodeParameters?.operation === operation; + }); + } + + const evaluationSetMetricsNodeExist = computed(() => { + return evaluationNodeExist('setMetrics'); + }); + + const evaluationSetOutputsNodeExist = computed(() => { + return evaluationNodeExist('setOutputs'); + }); + + // Methods + + const fetchTestCaseExecutions = async (params: { workflowId: string; runId: string }) => { + const testCaseExecutions = await evaluationsApi.getTestCaseExecutions( + rootStore.restApiContext, + params.workflowId, + params.runId, + ); + + testCaseExecutions.forEach((testCaseExecution) => { + testCaseExecutionsById.value[testCaseExecution.id] = testCaseExecution; + }); + + return testCaseExecutions; + }; + + // Test Runs Methods + const fetchTestRuns = async (workflowId: string) => { + loadingTestRuns.value = true; + try { + const runs = await evaluationsApi.getTestRuns(rootStore.restApiContext, workflowId); + runs.forEach((run) => { + testRunsById.value[run.id] = run; + if (['running', 'new'].includes(run.status)) { + startPollingTestRun(workflowId, run.id); + } + }); + return runs; + } finally { + loadingTestRuns.value = false; + } + }; + + const getTestRun = async (params: { workflowId: string; runId: string }) => { + const run = await evaluationsApi.getTestRun(rootStore.restApiContext, params); + testRunsById.value[run.id] = run; + return run; + }; + + const startTestRun = async (workflowId: string) => { + const result = await evaluationsApi.startTestRun(rootStore.restApiContext, workflowId); + return result; + }; + + const cancelTestRun = async (workflowId: string, testRunId: string) => { + const result = await evaluationsApi.cancelTestRun( + rootStore.restApiContext, + workflowId, + testRunId, + ); + return result; + }; + + const deleteTestRun = async (params: { workflowId: string; runId: string }) => { + const result = await evaluationsApi.deleteTestRun(rootStore.restApiContext, params); + if (result.success) { + const { [params.runId]: deleted, ...rest } = testRunsById.value; + testRunsById.value = rest; + } + return result; + }; + + // TODO: This is a temporary solution to poll for test run status. + // We should use a more efficient polling mechanism in the future. + const startPollingTestRun = (workflowId: string, runId: string) => { + const poll = async () => { + try { + const run = await getTestRun({ workflowId, runId }); + if (['running', 'new'].includes(run.status)) { + pollingTimeouts.value[runId] = setTimeout(poll, 1000); + } else { + delete pollingTimeouts.value[runId]; + } + } catch (error) { + // If the API call fails, continue polling + pollingTimeouts.value[runId] = setTimeout(poll, 1000); + } + }; + void poll(); + }; + + const cleanupPolling = () => { + Object.values(pollingTimeouts.value).forEach((timeout) => { + clearTimeout(timeout); + }); + pollingTimeouts.value = {}; + }; + + return { + // State + fetchedAll, + testRunsById, + testCaseExecutionsById, + + // Computed + isLoading, + isFeatureEnabled, + isEvaluationEnabled, + testRunsByWorkflowId, + evaluationTriggerExists, + evaluationSetMetricsNodeExist, + evaluationSetOutputsNodeExist, + + // Methods + fetchTestCaseExecutions, + fetchTestRuns, + getTestRun, + startTestRun, + cancelTestRun, + deleteTestRun, + cleanupPolling, + }; + }, + {}, +); diff --git a/packages/frontend/editor-ui/src/stores/nodeCreator.store.ts b/packages/frontend/editor-ui/src/stores/nodeCreator.store.ts index bb271e71d4..c7f3595c94 100644 --- a/packages/frontend/editor-ui/src/stores/nodeCreator.store.ts +++ b/packages/frontend/editor-ui/src/stores/nodeCreator.store.ts @@ -220,6 +220,45 @@ export const useNodeCreatorStore = defineStore(STORES.NODE_CREATOR, () => { }); } + function openNodeCreatorForActions(node: string, eventSource?: NodeCreatorOpenSource) { + const actionNode = allNodeCreatorNodes.value.find((i) => i.key === node); + + if (!actionNode) { + return; + } + + const nodeActions = actions.value[actionNode.key]; + + const transformedActions = nodeActions?.map((a) => + transformNodeType(a, actionNode.properties.displayName, 'action'), + ); + + ndvStore.activeNodeName = null; + setSelectedView(REGULAR_NODE_CREATOR_VIEW); + setNodeCreatorState({ + source: eventSource, + createNodeActive: true, + nodeCreatorView: REGULAR_NODE_CREATOR_VIEW, + }); + + setTimeout(() => { + useViewStacks().pushViewStack( + { + subcategory: '*', + title: actionNode.properties.displayName, + nodeIcon: { + type: 'icon', + name: 'check-double', + }, + rootView: 'Regular', + mode: 'actions', + items: transformedActions, + }, + { resetStacks: true }, + ); + }); + } + function getNodeCreatorFilter(nodeName: string, outputType?: NodeConnectionType) { let filter; const workflow = workflowsStore.getCurrentWorkflow(); @@ -411,6 +450,7 @@ export const useNodeCreatorStore = defineStore(STORES.NODE_CREATOR, () => { openSelectiveNodeCreator, openNodeCreatorForConnectingNode, openNodeCreatorForTriggerNodes, + openNodeCreatorForActions, onCreatorOpened, onNodeFilterChanged, onCategoryExpanded, diff --git a/packages/frontend/editor-ui/src/stores/testDefinition.store.ee.test.ts b/packages/frontend/editor-ui/src/stores/testDefinition.store.ee.test.ts deleted file mode 100644 index 9c712aae7e..0000000000 --- a/packages/frontend/editor-ui/src/stores/testDefinition.store.ee.test.ts +++ /dev/null @@ -1,488 +0,0 @@ -import { createPinia, setActivePinia } from 'pinia'; -import { useTestDefinitionStore } from '@/stores/testDefinition.store.ee'; // Adjust the import path as necessary -import { useRootStore } from '@n8n/stores/useRootStore'; -import { usePostHog } from '@/stores/posthog.store'; -import { useAnnotationTagsStore } from '@/stores/tags.store'; -import type { TestDefinitionRecord, TestRunRecord } from '@/api/testDefinition.ee'; -import { mockedStore } from '@/__tests__/utils'; - -const { - createTestDefinition, - deleteTestDefinition, - getTestDefinitions, - updateTestDefinition, - getTestRuns, - getTestRun, - startTestRun, - deleteTestRun, -} = vi.hoisted(() => ({ - getTestDefinitions: vi.fn(), - createTestDefinition: vi.fn(), - updateTestDefinition: vi.fn(), - deleteTestDefinition: vi.fn(), - getTestRuns: vi.fn(), - getTestRun: vi.fn(), - startTestRun: vi.fn(), - deleteTestRun: vi.fn(), -})); - -vi.mock('@/api/testDefinition.ee', () => ({ - createTestDefinition, - deleteTestDefinition, - getTestDefinitions, - updateTestDefinition, - getTestRuns, - getTestRun, - startTestRun, - deleteTestRun, -})); - -vi.mock('@n8n/stores/useRootStore', () => ({ - useRootStore: vi.fn(() => ({ - restApiContext: { instanceId: 'test-instance-id' }, - })), -})); - -const TEST_DEF_A: TestDefinitionRecord = { - id: '1', - name: 'Test Definition A', - workflowId: '123', - description: 'Description A', - createdAt: '2023-01-01T00:00:00.000Z', -}; -const TEST_DEF_B: TestDefinitionRecord = { - id: '2', - name: 'Test Definition B', - workflowId: '123', - description: 'Description B', - createdAt: '2023-01-01T00:00:00.000Z', -}; -const TEST_DEF_NEW: TestDefinitionRecord = { - id: '3', - name: 'New Test Definition', - workflowId: '123', - description: 'New Description', - createdAt: '2023-01-01T00:00:00.000Z', -}; - -const TEST_RUN: TestRunRecord = { - id: 'run1', - testDefinitionId: '1', - status: 'completed', - metrics: { metric1: 0.75 }, - createdAt: '2024-01-01', - updatedAt: '2024-01-01', - runAt: '2024-01-01', - completedAt: '2024-01-01', -}; - -describe('testDefinition.store.ee', () => { - let store: ReturnType; - let rootStoreMock: ReturnType; - let posthogStoreMock: ReturnType; - - beforeEach(() => { - vi.restoreAllMocks(); - setActivePinia(createPinia()); - store = useTestDefinitionStore(); - rootStoreMock = useRootStore(); - posthogStoreMock = usePostHog(); - - mockedStore(useAnnotationTagsStore).fetchAll = vi.fn().mockResolvedValue([]); - getTestDefinitions.mockResolvedValue({ - count: 2, - testDefinitions: [TEST_DEF_A, TEST_DEF_B], - }); - - createTestDefinition.mockResolvedValue(TEST_DEF_NEW); - - deleteTestDefinition.mockResolvedValue({ success: true }); - - getTestRuns.mockResolvedValue([TEST_RUN]); - getTestRun.mockResolvedValue(TEST_RUN); - startTestRun.mockResolvedValue({ success: true }); - deleteTestRun.mockResolvedValue({ success: true }); - }); - - test('Initialization', () => { - expect(store.testDefinitionsById).toEqual({}); - expect(store.isLoading).toBe(false); - expect(store.hasTestDefinitions).toBe(false); - }); - - describe('Test Definitions', () => { - test('Fetching Test Definitions', async () => { - expect(store.isLoading).toBe(false); - - const result = await store.fetchAll({ workflowId: '123' }); - - expect(getTestDefinitions).toHaveBeenCalledWith(rootStoreMock.restApiContext, { - workflowId: '123', - }); - expect(store.testDefinitionsById).toEqual({ - '1': TEST_DEF_A, - '2': TEST_DEF_B, - }); - expect(store.isLoading).toBe(false); - expect(result).toEqual([TEST_DEF_A, TEST_DEF_B]); - }); - - test('Fetching Test Definitions with force flag', async () => { - expect(store.isLoading).toBe(false); - - const result = await store.fetchAll({ force: true, workflowId: '123' }); - - expect(getTestDefinitions).toHaveBeenCalledWith(rootStoreMock.restApiContext, { - workflowId: '123', - }); - expect(store.testDefinitionsById).toEqual({ - '1': TEST_DEF_A, - '2': TEST_DEF_B, - }); - expect(store.isLoading).toBe(false); - expect(result).toEqual([TEST_DEF_A, TEST_DEF_B]); - }); - - test('Fetching Test Definitions when already fetched', async () => { - store.fetchedAll = true; - - const result = await store.fetchAll(); - - expect(getTestDefinitions).not.toHaveBeenCalled(); - expect(store.testDefinitionsById).toEqual({}); - expect(result).toEqual({ - count: 0, - testDefinitions: [], - }); - }); - - test('Upserting Test Definitions - New Definition', () => { - const newDefinition = TEST_DEF_NEW; - - store.upsertTestDefinitions([newDefinition]); - - expect(store.testDefinitionsById).toEqual({ - '3': TEST_DEF_NEW, - }); - }); - - test('Upserting Test Definitions - Existing Definition', () => { - store.testDefinitionsById = { - '1': TEST_DEF_A, - }; - - const updatedDefinition = { - id: '1', - name: 'Updated Test Definition A', - description: 'Updated Description A', - workflowId: '123', - createdAt: '2023-01-01T00:00:00.000Z', - }; - - store.upsertTestDefinitions([updatedDefinition]); - - expect(store.testDefinitionsById).toEqual({ - 1: updatedDefinition, - }); - }); - - test('Creating a Test Definition', async () => { - const params = { - name: 'New Test Definition', - workflowId: 'test-workflow-id', - evaluationWorkflowId: 'test-evaluation-workflow-id', - description: 'New Description', - }; - - const result = await store.create(params); - - expect(createTestDefinition).toHaveBeenCalledWith(rootStoreMock.restApiContext, params); - expect(store.testDefinitionsById).toEqual({ - '3': TEST_DEF_NEW, - }); - expect(result).toEqual(TEST_DEF_NEW); - }); - - test('Updating a Test Definition', async () => { - store.testDefinitionsById = { - '1': TEST_DEF_A, - '2': TEST_DEF_B, - }; - - const params = { - id: '1', - name: 'Updated Test Definition A', - description: 'Updated Description A', - workflowId: '123', - }; - updateTestDefinition.mockResolvedValue(params); - - const result = await store.update(params); - - expect(updateTestDefinition).toHaveBeenCalledWith(rootStoreMock.restApiContext, '1', { - name: 'Updated Test Definition A', - description: 'Updated Description A', - workflowId: '123', - }); - expect(store.testDefinitionsById).toEqual({ - '1': { ...TEST_DEF_A, ...params }, - '2': TEST_DEF_B, - }); - expect(result).toEqual(params); - }); - - test('Deleting a Test Definition', () => { - store.testDefinitionsById = { - '1': TEST_DEF_A, - '2': TEST_DEF_B, - }; - - store.deleteTestDefinition('1'); - - expect(store.testDefinitionsById).toEqual({ - '2': TEST_DEF_B, - }); - }); - - test('Deleting a Test Definition by ID', async () => { - store.testDefinitionsById = { - '1': TEST_DEF_A, - }; - - const result = await store.deleteById('1'); - - expect(deleteTestDefinition).toHaveBeenCalledWith(rootStoreMock.restApiContext, '1'); - expect(store.testDefinitionsById).toEqual({}); - expect(result).toBe(true); - }); - }); - - describe('Computed Properties', () => { - test('hasTestDefinitions', () => { - store.testDefinitionsById = {}; - - expect(store.hasTestDefinitions).toBe(false); - store.testDefinitionsById = { - '1': TEST_DEF_A, - }; - - expect(store.hasTestDefinitions).toBe(true); - }); - - test('isFeatureEnabled', () => { - posthogStoreMock.isFeatureEnabled = vi.fn().mockReturnValue(false); - - expect(store.isFeatureEnabled).toBe(false); - posthogStoreMock.isFeatureEnabled = vi.fn().mockReturnValue(true); - - expect(store.isFeatureEnabled).toBe(true); - }); - - test('allTestDefinitionsByWorkflowId', () => { - store.testDefinitionsById = { - '1': { ...TEST_DEF_A, workflowId: 'workflow1' }, - '2': { ...TEST_DEF_B, workflowId: 'workflow1' }, - '3': { ...TEST_DEF_NEW, workflowId: 'workflow2' }, - }; - - expect(store.allTestDefinitionsByWorkflowId).toEqual({ - workflow1: [ - { ...TEST_DEF_A, workflowId: 'workflow1' }, - { ...TEST_DEF_B, workflowId: 'workflow1' }, - ], - workflow2: [{ ...TEST_DEF_NEW, workflowId: 'workflow2' }], - }); - }); - - test('lastRunByTestId', () => { - const olderRun = { - ...TEST_RUN, - id: 'run2', - testDefinitionId: '1', - updatedAt: '2023-12-31', - }; - - const newerRun = { - ...TEST_RUN, - id: 'run3', - testDefinitionId: '2', - updatedAt: '2024-01-02', - }; - - store.testRunsById = { - run1: { ...TEST_RUN, testDefinitionId: '1' }, - run2: olderRun, - run3: newerRun, - }; - - expect(store.lastRunByTestId).toEqual({ - '1': TEST_RUN, - '2': newerRun, - }); - }); - - test('lastRunByTestId with no runs', () => { - store.testRunsById = {}; - expect(store.lastRunByTestId).toEqual({}); - }); - }); - - describe('Error Handling', () => { - test('create', async () => { - createTestDefinition.mockRejectedValue(new Error('Create failed')); - - await expect( - store.create({ name: 'New Test Definition', workflowId: 'test-workflow-id' }), - ).rejects.toThrow('Create failed'); - }); - - test('update', async () => { - updateTestDefinition.mockRejectedValue(new Error('Update failed')); - - await expect(store.update({ id: '1', name: 'Updated Test Definition A' })).rejects.toThrow( - 'Update failed', - ); - }); - - test('deleteById', async () => { - deleteTestDefinition.mockResolvedValue({ success: false }); - - const result = await store.deleteById('1'); - - expect(result).toBe(false); - }); - }); - - describe('Test Runs', () => { - test('Fetching Test Runs', async () => { - const result = await store.fetchTestRuns('1'); - - expect(getTestRuns).toHaveBeenCalledWith(rootStoreMock.restApiContext, '1'); - expect(store.testRunsById).toEqual({ - run1: TEST_RUN, - }); - expect(result).toEqual([TEST_RUN]); - }); - - test('Getting specific Test Run', async () => { - const params = { testDefinitionId: '1', runId: 'run1' }; - const result = await store.getTestRun(params); - - expect(getTestRun).toHaveBeenCalledWith(rootStoreMock.restApiContext, params); - expect(store.testRunsById).toEqual({ - run1: TEST_RUN, - }); - expect(result).toEqual(TEST_RUN); - }); - - test('Starting Test Run', async () => { - const result = await store.startTestRun('1'); - - expect(startTestRun).toHaveBeenCalledWith(rootStoreMock.restApiContext, '1'); - expect(result).toEqual({ success: true }); - }); - - test('Deleting Test Run', async () => { - store.testRunsById = { run1: TEST_RUN }; - const params = { testDefinitionId: '1', runId: 'run1' }; - - const result = await store.deleteTestRun(params); - - expect(deleteTestRun).toHaveBeenCalledWith(rootStoreMock.restApiContext, params); - expect(store.testRunsById).toEqual({}); - expect(result).toEqual({ success: true }); - }); - - test('Getting Test Runs by Test ID', () => { - store.testRunsById = { - run1: TEST_RUN, - run2: { ...TEST_RUN, id: 'run2', testDefinitionId: '2' }, - }; - - const runs = store.testRunsByTestId['1']; - - expect(runs).toEqual([TEST_RUN]); - }); - }); - - describe('Polling Mechanism', () => { - beforeEach(() => { - vi.useFakeTimers(); - }); - - afterEach(() => { - vi.useRealTimers(); - }); - - test('should start polling for running test runs', async () => { - const runningTestRun = { - ...TEST_RUN, - status: 'running', - }; - - getTestRuns.mockResolvedValueOnce([runningTestRun]); - - // First call returns running status - getTestRun.mockResolvedValueOnce({ - ...runningTestRun, - status: 'running', - }); - - // Second call returns completed status - getTestRun.mockResolvedValueOnce({ - ...runningTestRun, - status: 'completed', - }); - - await store.fetchTestRuns('1'); - - expect(store.testRunsById).toEqual({ - run1: runningTestRun, - }); - - // Advance timer to trigger the first poll - await vi.advanceTimersByTimeAsync(1000); - - // Verify first poll happened - expect(getTestRun).toHaveBeenCalledWith(rootStoreMock.restApiContext, { - testDefinitionId: '1', - runId: 'run1', - }); - - // Advance timer again - await vi.advanceTimersByTimeAsync(1000); - - // Verify polling stopped after status changed to completed - expect(getTestRun).toHaveBeenCalledTimes(2); - }); - - test('should cleanup polling timeouts', async () => { - const runningTestRun = { - ...TEST_RUN, - status: 'running', - }; - - getTestRuns.mockResolvedValueOnce([runningTestRun]); - getTestRun.mockResolvedValue({ - ...runningTestRun, - status: 'running', - }); - - await store.fetchTestRuns('1'); - - // Wait for the first poll to complete - await vi.runOnlyPendingTimersAsync(); - - // Clear mock calls from initial setup - getTestRun.mockClear(); - - store.cleanupPolling(); - - // Advance timer - await vi.advanceTimersByTimeAsync(1000); - - // Verify no more polling happened after cleanup - expect(getTestRun).not.toHaveBeenCalled(); - }); - }); -}); diff --git a/packages/frontend/editor-ui/src/stores/testDefinition.store.ee.ts b/packages/frontend/editor-ui/src/stores/testDefinition.store.ee.ts deleted file mode 100644 index ed43f0e3f1..0000000000 --- a/packages/frontend/editor-ui/src/stores/testDefinition.store.ee.ts +++ /dev/null @@ -1,420 +0,0 @@ -import { defineStore } from 'pinia'; -import { computed, ref } from 'vue'; -import { useRootStore } from '@n8n/stores/useRootStore'; -import * as testDefinitionsApi from '@/api/testDefinition.ee'; -import type { - TestCaseExecutionRecord, - TestDefinitionRecord, - TestRunRecord, -} from '@/api/testDefinition.ee'; -import { usePostHog } from './posthog.store'; -import { WORKFLOW_EVALUATION_EXPERIMENT } from '@/constants'; -import { STORES } from '@n8n/stores'; -import { useAnnotationTagsStore } from './tags.store'; -import { useI18n } from '@/composables/useI18n'; - -type FieldIssue = { field: string; message: string }; - -export const useTestDefinitionStore = defineStore( - STORES.TEST_DEFINITION, - () => { - // State - const testDefinitionsById = ref>({}); - const loading = ref(false); - const fetchedAll = ref(false); - const testRunsById = ref>({}); - const testCaseExecutionsById = ref>({}); - const pollingTimeouts = ref>({}); - const fieldsIssues = ref>({}); - - // Store instances - const posthogStore = usePostHog(); - const rootStore = useRootStore(); - const tagsStore = useAnnotationTagsStore(); - const locale = useI18n(); - // Computed - const allTestDefinitions = computed(() => { - return Object.values(testDefinitionsById.value).sort((a, b) => - (a.name ?? '').localeCompare(b.name ?? ''), - ); - }); - - const allTestDefinitionsByWorkflowId = computed(() => { - return Object.values(testDefinitionsById.value).reduce( - (acc: Record, test) => { - if (!acc[test.workflowId]) { - acc[test.workflowId] = []; - } - acc[test.workflowId].push(test); - return acc; - }, - {}, - ); - }); - - // Enable with `window.featureFlags.override('025_workflow_evaluation', true)` - const isFeatureEnabled = computed(() => - posthogStore.isFeatureEnabled(WORKFLOW_EVALUATION_EXPERIMENT), - ); - - const isLoading = computed(() => loading.value); - - const hasTestDefinitions = computed(() => Object.keys(testDefinitionsById.value).length > 0); - - const testRunsByTestId = computed(() => { - return Object.values(testRunsById.value).reduce( - (acc: Record, run) => { - if (!acc[run.testDefinitionId]) { - acc[run.testDefinitionId] = []; - } - acc[run.testDefinitionId].push(run); - return acc; - }, - {}, - ); - }); - - const lastRunByTestId = computed(() => { - const grouped = Object.values(testRunsById.value).reduce( - (acc: Record, run) => { - if (!acc[run.testDefinitionId]) { - acc[run.testDefinitionId] = []; - } - acc[run.testDefinitionId].push(run); - return acc; - }, - {}, - ); - - return Object.entries(grouped).reduce( - (acc: Record, [testId, runs]) => { - acc[testId] = - runs.sort( - (a, b) => new Date(b.updatedAt).getTime() - new Date(a.updatedAt).getTime(), - )[0] || null; - return acc; - }, - {}, - ); - }); - - const getFieldIssues = (testId: string) => fieldsIssues.value[testId] || []; - - // Methods - const setAllTestDefinitions = (definitions: TestDefinitionRecord[]) => { - testDefinitionsById.value = definitions.reduce( - (acc: Record, def: TestDefinitionRecord) => { - acc[def.id] = def; - return acc; - }, - {}, - ); - }; - - /** - * Upserts test definitions in the store. - * @param toUpsertDefinitions - An array of test definitions to upsert. - */ - const upsertTestDefinitions = (toUpsertDefinitions: TestDefinitionRecord[]) => { - toUpsertDefinitions.forEach((toUpsertDef) => { - const defId = toUpsertDef.id; - if (!defId) throw Error('ID is required for upserting'); - const currentDef = testDefinitionsById.value[defId]; - testDefinitionsById.value = { - ...testDefinitionsById.value, - [defId]: { - ...currentDef, - ...toUpsertDef, - }, - }; - }); - }; - - const deleteTestDefinition = (id: string) => { - const { [id]: deleted, ...rest } = testDefinitionsById.value; - testDefinitionsById.value = rest; - }; - - const fetchRunsForAllTests = async () => { - const testDefinitions = Object.values(testDefinitionsById.value); - try { - await Promise.all(testDefinitions.map(async (testDef) => await fetchTestRuns(testDef.id))); - } catch (error) { - console.error('Error fetching test runs:', error); - } - }; - - const fetchTestDefinition = async (id: string) => { - const testDefinition = await testDefinitionsApi.getTestDefinition( - rootStore.restApiContext, - id, - ); - testDefinitionsById.value[testDefinition.id] = testDefinition; - updateRunFieldIssues(id); - return testDefinition; - }; - - const fetchTestDefinitionsByWorkflowId = async (workflowId: string) => { - const testDefinitions = await testDefinitionsApi.getTestDefinitions( - rootStore.restApiContext, - { workflowId }, - ); - setAllTestDefinitions(testDefinitions.testDefinitions); - return testDefinitions.testDefinitions; - }; - - const fetchTestCaseExecutions = async (params: { testDefinitionId: string; runId: string }) => { - const testCaseExecutions = await testDefinitionsApi.getTestCaseExecutions( - rootStore.restApiContext, - params.testDefinitionId, - params.runId, - ); - - testCaseExecutions.forEach((testCaseExecution) => { - testCaseExecutionsById.value[testCaseExecution.id] = testCaseExecution; - }); - - return testCaseExecutions; - }; - - /** - * Fetches all test definitions from the API. - * @param {boolean} force - If true, fetches the definitions from the API even if they were already fetched before. - */ - const fetchAll = async (params?: { force?: boolean; workflowId?: string }) => { - const { force = false, workflowId } = params ?? {}; - if (!force && fetchedAll.value && !workflowId) { - const testDefinitions = Object.values(testDefinitionsById.value); - return { - count: testDefinitions.length, - testDefinitions, - }; - } - - loading.value = true; - try { - if (!workflowId) { - return; - } - - const retrievedDefinitions = await fetchTestDefinitionsByWorkflowId(workflowId); - fetchedAll.value = true; - - await Promise.all([ - tagsStore.fetchAll({ force: true, withUsageCount: true }), - fetchRunsForAllTests(), - ]); - return retrievedDefinitions; - } finally { - loading.value = false; - } - }; - - const fetchExampleEvaluationInput = async (testId: string, annotationTagId: string) => { - return await testDefinitionsApi.getExampleEvaluationInput( - rootStore.restApiContext, - testId, - annotationTagId, - ); - }; - - /** - * Creates a new test definition using the provided parameters. - * - * @param {Object} params - An object containing the necessary parameters to create a test definition. - * @param {string} params.name - The name of the new test definition. - * @param {string} params.workflowId - The ID of the workflow associated with the test definition. - * @returns {Promise} A promise that resolves to the newly created test definition. - * @throws {Error} Throws an error if there is a problem creating the test definition. - */ - const create = async (params: { name: string; workflowId: string }) => { - const createdDefinition = await testDefinitionsApi.createTestDefinition( - rootStore.restApiContext, - params, - ); - upsertTestDefinitions([createdDefinition]); - updateRunFieldIssues(createdDefinition.id); - return createdDefinition; - }; - - const update = async (params: Partial) => { - if (!params.id) throw new Error('ID is required to update a test definition'); - - const { id, ...updateParams } = params; - const updatedDefinition = await testDefinitionsApi.updateTestDefinition( - rootStore.restApiContext, - id, - updateParams, - ); - upsertTestDefinitions([updatedDefinition]); - updateRunFieldIssues(params.id); - return updatedDefinition; - }; - - /** - * Deletes a test definition by its ID. - * - * @param {number} id - The ID of the test definition to delete. - * @returns {Promise} A promise that resolves to true if the test definition was successfully deleted, false otherwise. - */ - const deleteById = async (id: string) => { - const result = await testDefinitionsApi.deleteTestDefinition(rootStore.restApiContext, id); - - if (result.success) { - deleteTestDefinition(id); - } - - return result.success; - }; - - // Test Runs Methods - const fetchTestRuns = async (testDefinitionId: string) => { - loading.value = true; - try { - const runs = await testDefinitionsApi.getTestRuns( - rootStore.restApiContext, - testDefinitionId, - ); - runs.forEach((run) => { - testRunsById.value[run.id] = run; - if (['running', 'new'].includes(run.status)) { - startPollingTestRun(testDefinitionId, run.id); - } - }); - return runs; - } finally { - loading.value = false; - } - }; - - const getTestRun = async (params: { testDefinitionId: string; runId: string }) => { - const run = await testDefinitionsApi.getTestRun(rootStore.restApiContext, params); - testRunsById.value[run.id] = run; - updateRunFieldIssues(params.testDefinitionId); - return run; - }; - - const startTestRun = async (testDefinitionId: string) => { - const result = await testDefinitionsApi.startTestRun( - rootStore.restApiContext, - testDefinitionId, - ); - return result; - }; - - const cancelTestRun = async (testDefinitionId: string, testRunId: string) => { - const result = await testDefinitionsApi.cancelTestRun( - rootStore.restApiContext, - testDefinitionId, - testRunId, - ); - return result; - }; - - const deleteTestRun = async (params: { testDefinitionId: string; runId: string }) => { - const result = await testDefinitionsApi.deleteTestRun(rootStore.restApiContext, params); - if (result.success) { - const { [params.runId]: deleted, ...rest } = testRunsById.value; - testRunsById.value = rest; - } - return result; - }; - - // TODO: This is a temporary solution to poll for test run status. - // We should use a more efficient polling mechanism in the future. - const startPollingTestRun = (testDefinitionId: string, runId: string) => { - const poll = async () => { - const run = await getTestRun({ testDefinitionId, runId }); - if (['running', 'new'].includes(run.status)) { - pollingTimeouts.value[runId] = setTimeout(poll, 1000); - } else { - delete pollingTimeouts.value[runId]; - } - }; - void poll(); - }; - - const cleanupPolling = () => { - Object.values(pollingTimeouts.value).forEach((timeout) => { - clearTimeout(timeout); - }); - pollingTimeouts.value = {}; - }; - - const updateRunFieldIssues = (testId: string) => { - const issues: FieldIssue[] = []; - const testDefinition = testDefinitionsById.value[testId]; - - if (!testDefinition) { - return; - } - - if (!testDefinition.annotationTagId) { - issues.push({ - field: 'tags', - message: locale.baseText('testDefinition.configError.noEvaluationTag'), - }); - } else { - const tagUsageCount = tagsStore.tagsById[testDefinition.annotationTagId]?.usageCount ?? 0; - - if (tagUsageCount === 0) { - issues.push({ - field: 'tags', - message: locale.baseText('testDefinition.configError.noExecutionsAddedToTag'), - }); - } - } - - if (!testDefinition.evaluationWorkflowId) { - issues.push({ - field: 'evaluationWorkflow', - message: locale.baseText('testDefinition.configError.noEvaluationWorkflow'), - }); - } - - fieldsIssues.value = { - ...fieldsIssues.value, - [testId]: issues, - }; - return issues; - }; - - return { - // State - fetchedAll, - testDefinitionsById, - testRunsById, - testCaseExecutionsById, - - // Computed - allTestDefinitions, - allTestDefinitionsByWorkflowId, - isLoading, - hasTestDefinitions, - isFeatureEnabled, - testRunsByTestId, - lastRunByTestId, - - // Methods - fetchTestDefinition, - fetchTestDefinitionsByWorkflowId, - fetchTestCaseExecutions, - fetchAll, - fetchExampleEvaluationInput, - create, - update, - deleteById, - upsertTestDefinitions, - deleteTestDefinition, - fetchTestRuns, - getTestRun, - startTestRun, - cancelTestRun, - deleteTestRun, - cleanupPolling, - getFieldIssues, - updateRunFieldIssues, - }; - }, - {}, -); diff --git a/packages/frontend/editor-ui/src/stores/usage.store.ts b/packages/frontend/editor-ui/src/stores/usage.store.ts index 3021db61d7..b00e8255ac 100644 --- a/packages/frontend/editor-ui/src/stores/usage.store.ts +++ b/packages/frontend/editor-ui/src/stores/usage.store.ts @@ -23,6 +23,10 @@ const DEFAULT_STATE: UsageState = { value: 0, warningThreshold: 0.8, }, + workflowsHavingEvaluations: { + value: 0, + limit: 0, + }, }, license: { planId: '', @@ -41,6 +45,12 @@ export const useUsageStore = defineStore('usage', () => { const planId = computed(() => state.data.license.planId); const activeWorkflowTriggersLimit = computed(() => state.data.usage.activeWorkflowTriggers.limit); const activeWorkflowTriggersCount = computed(() => state.data.usage.activeWorkflowTriggers.value); + const workflowsWithEvaluationsLimit = computed( + () => state.data.usage.workflowsHavingEvaluations.limit, + ); + const workflowsWithEvaluationsCount = computed( + () => state.data.usage.workflowsHavingEvaluations.value, + ); const executionPercentage = computed( () => (activeWorkflowTriggersCount.value / activeWorkflowTriggersLimit.value) * 100, ); @@ -103,6 +113,8 @@ export const useUsageStore = defineStore('usage', () => { planId, activeWorkflowTriggersLimit, activeWorkflowTriggersCount, + workflowsWithEvaluationsLimit, + workflowsWithEvaluationsCount, executionPercentage, instanceId, managementToken, diff --git a/packages/frontend/editor-ui/src/stores/usage.test.ts b/packages/frontend/editor-ui/src/stores/usage.test.ts index def731e4af..5344e2ed9e 100644 --- a/packages/frontend/editor-ui/src/stores/usage.test.ts +++ b/packages/frontend/editor-ui/src/stores/usage.test.ts @@ -26,6 +26,10 @@ describe('Usage and plan store', () => { value, warningThreshold, }, + workflowsHavingEvaluations: { + value: 0, + limit: 0, + }, }, license: { planId: '', diff --git a/packages/frontend/editor-ui/src/views/Evaluations.ee/EvaluationsRootView.vue b/packages/frontend/editor-ui/src/views/Evaluations.ee/EvaluationsRootView.vue new file mode 100644 index 0000000000..40158f0351 --- /dev/null +++ b/packages/frontend/editor-ui/src/views/Evaluations.ee/EvaluationsRootView.vue @@ -0,0 +1,162 @@ + + + + + diff --git a/packages/frontend/editor-ui/src/views/Evaluations.ee/EvaluationsView.vue b/packages/frontend/editor-ui/src/views/Evaluations.ee/EvaluationsView.vue new file mode 100644 index 0000000000..5def050cfa --- /dev/null +++ b/packages/frontend/editor-ui/src/views/Evaluations.ee/EvaluationsView.vue @@ -0,0 +1,123 @@ + + + + + diff --git a/packages/frontend/editor-ui/src/views/Evaluations.ee/TestRunDetailView.vue b/packages/frontend/editor-ui/src/views/Evaluations.ee/TestRunDetailView.vue new file mode 100644 index 0000000000..0c0fce3a2e --- /dev/null +++ b/packages/frontend/editor-ui/src/views/Evaluations.ee/TestRunDetailView.vue @@ -0,0 +1,413 @@ + + + + + diff --git a/packages/frontend/editor-ui/src/views/Evaluations.ee/tests/EvaluationsRootView.test.ts b/packages/frontend/editor-ui/src/views/Evaluations.ee/tests/EvaluationsRootView.test.ts new file mode 100644 index 0000000000..4c0d76ed5d --- /dev/null +++ b/packages/frontend/editor-ui/src/views/Evaluations.ee/tests/EvaluationsRootView.test.ts @@ -0,0 +1,94 @@ +import { describe, it, expect, beforeEach } from 'vitest'; +import { mock } from 'vitest-mock-extended'; +import { createTestingPinia } from '@pinia/testing'; +import { createComponentRenderer } from '@/__tests__/render'; +import EvaluationRootView from '../EvaluationsRootView.vue'; + +import { useWorkflowsStore } from '@/stores/workflows.store'; +import { useEvaluationStore } from '@/stores/evaluation.store.ee'; +import { mockedStore } from '@/__tests__/utils'; +import type { IWorkflowDb } from '@/Interface'; +import { waitFor } from '@testing-library/vue'; +import type { TestRunRecord } from '@/api/evaluation.ee'; +import { PLACEHOLDER_EMPTY_WORKFLOW_ID } from '@/constants'; + +describe('EvaluationsRootView', () => { + const renderComponent = createComponentRenderer(EvaluationRootView); + + const mockWorkflow: IWorkflowDb = { + id: 'different-id', + name: 'Test Workflow', + active: false, + isArchived: false, + createdAt: Date.now(), + updatedAt: Date.now(), + nodes: [], + connections: {}, + settings: { + executionOrder: 'v1', + }, + tags: [], + pinData: {}, + versionId: '', + usedCredentials: [], + }; + + const mockTestRuns: TestRunRecord[] = [mock({ workflowId: mockWorkflow.id })]; + + beforeEach(() => { + createTestingPinia(); + }); + + it('should initialize workflow on mount if not already initialized', async () => { + const workflowsStore = mockedStore(useWorkflowsStore); + const uninitializedWorkflow = { ...mockWorkflow, id: PLACEHOLDER_EMPTY_WORKFLOW_ID }; + workflowsStore.workflow = uninitializedWorkflow; + const newWorkflowId = 'workflow123'; + + renderComponent({ props: { name: newWorkflowId } }); + + // Wait for async operation to complete + await waitFor(() => expect(workflowsStore.fetchWorkflow).toHaveBeenCalledWith(newWorkflowId)); + }); + + it('should not initialize workflow if already loaded', async () => { + const workflowsStore = mockedStore(useWorkflowsStore); + workflowsStore.workflow = mockWorkflow; + + renderComponent({ props: { name: mockWorkflow.id } }); + + expect(workflowsStore.fetchWorkflow).not.toHaveBeenCalled(); + }); + + it('should load test data', async () => { + const evaluationStore = mockedStore(useEvaluationStore); + evaluationStore.fetchTestRuns.mockResolvedValue(mockTestRuns); + + renderComponent({ props: { name: mockWorkflow.id } }); + + await waitFor(() => + expect(evaluationStore.fetchTestRuns).toHaveBeenCalledWith(mockWorkflow.id), + ); + }); + + it('should not render setup wizard when there are test runs', async () => { + const workflowsStore = mockedStore(useWorkflowsStore); + workflowsStore.fetchWorkflow.mockResolvedValue(mockWorkflow); + const evaluationStore = mockedStore(useEvaluationStore); + evaluationStore.testRunsById = { foo: mock({ workflowId: mockWorkflow.id }) }; + + const { container } = renderComponent({ props: { name: mockWorkflow.id } }); + + // Check that setupContent is not present + await waitFor(() => expect(container.querySelector('.setupContent')).toBeFalsy()); + }); + + it('should render the setup wizard when there there are no test runs', async () => { + const workflowsStore = mockedStore(useWorkflowsStore); + workflowsStore.fetchWorkflow.mockResolvedValue(mockWorkflow); + + const { container } = renderComponent({ props: { name: mockWorkflow.id } }); + + await waitFor(() => expect(container.querySelector('.setupContent')).toBeTruthy()); + }); +}); diff --git a/packages/frontend/editor-ui/src/views/Evaluations.ee/tests/EvaluationsView.test.ts b/packages/frontend/editor-ui/src/views/Evaluations.ee/tests/EvaluationsView.test.ts new file mode 100644 index 0000000000..1d161fb0fb --- /dev/null +++ b/packages/frontend/editor-ui/src/views/Evaluations.ee/tests/EvaluationsView.test.ts @@ -0,0 +1,102 @@ +import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; +import { createTestingPinia } from '@pinia/testing'; +import { createComponentRenderer } from '@/__tests__/render'; +import EvaluationsView from '@/views/Evaluations.ee/EvaluationsView.vue'; + +import { cleanupAppModals, createAppModals, mockedStore } from '@/__tests__/utils'; +import { useEvaluationStore } from '@/stores/evaluation.store.ee'; +import userEvent from '@testing-library/user-event'; +import type { TestRunRecord } from '@/api/evaluation.ee'; +import { waitFor } from '@testing-library/vue'; +// import { useWorkflowsStore } from '@/stores/workflows.store'; + +vi.mock('vue-router', () => { + const push = vi.fn(); + const replace = vi.fn(); + const query = {}; + return { + useRouter: () => ({ + push, + replace, + }), + useRoute: () => ({ + query, + }), + RouterLink: { + template: '', + }, + }; +}); + +const renderComponent = createComponentRenderer(EvaluationsView, { + props: { name: 'workflow-id' }, +}); + +describe('EvaluationsView', () => { + const mockTestRuns: TestRunRecord[] = [ + { + id: 'run1', + workflowId: 'workflow-id', + status: 'completed', + runAt: '2023-01-01', + createdAt: '2023-01-01', + updatedAt: '2023-01-01', + completedAt: '2023-01-01', + metrics: { + some: 1, + }, + }, + ]; + + beforeEach(() => { + createTestingPinia(); + createAppModals(); + }); + + afterEach(() => { + vi.clearAllMocks(); + cleanupAppModals(); + }); + + describe('Test Runs functionality', () => { + it('should display test runs table when runs exist', async () => { + const evaluationStore = mockedStore(useEvaluationStore); + evaluationStore.testRunsById = { + [mockTestRuns[0].id]: mockTestRuns[0], + }; + + evaluationStore.fetchTestRuns.mockResolvedValue(mockTestRuns); + + const { getByTestId } = renderComponent(); + await waitFor(() => expect(getByTestId('past-runs-table')).toBeInTheDocument()); + // expect(getByTestId('past-runs-table')).toBeInTheDocument(); + }); + + it('should start a test run when run test button is clicked', async () => { + const evaluationStore = mockedStore(useEvaluationStore); + evaluationStore.testRunsById = { + run1: { + id: 'run1', + workflowId: 'workflow-id', + status: 'completed', + runAt: '2023-01-01', + createdAt: '2023-01-01', + updatedAt: '2023-01-01', + completedAt: '2023-01-01', + metrics: { + some: 1, + }, + }, + }; + + const { getByTestId } = renderComponent(); + + await waitFor(() => expect(getByTestId('run-test-button')).toBeInTheDocument()); + + await userEvent.click(getByTestId('run-test-button')); + + expect(evaluationStore.startTestRun).toHaveBeenCalledWith('workflow-id'); + expect(evaluationStore.fetchTestRuns).toHaveBeenCalledWith('workflow-id'); + }); + }); +}); diff --git a/packages/frontend/editor-ui/src/views/NodeView.vue b/packages/frontend/editor-ui/src/views/NodeView.vue index ed7c3627f4..9d7c4ea37e 100644 --- a/packages/frontend/editor-ui/src/views/NodeView.vue +++ b/packages/frontend/editor-ui/src/views/NodeView.vue @@ -70,7 +70,12 @@ import { import { useSourceControlStore } from '@/stores/sourceControl.store'; import { useNodeCreatorStore } from '@/stores/nodeCreator.store'; import { useExternalHooks } from '@/composables/useExternalHooks'; -import { NodeConnectionTypes, jsonParse } from 'n8n-workflow'; +import { + NodeConnectionTypes, + jsonParse, + EVALUATION_TRIGGER_NODE_TYPE, + EVALUATION_NODE_TYPE, +} from 'n8n-workflow'; import type { NodeConnectionType, IDataObject, ExecutionSummary, IConnection } from 'n8n-workflow'; import { useToast } from '@/composables/useToast'; import { useSettingsStore } from '@/stores/settings.store'; @@ -327,6 +332,22 @@ async function initializeRoute(force = false) { return; } + // Open node panel if the route has a corresponding action + if (route.query.action === 'addEvaluationTrigger') { + nodeCreatorStore.openNodeCreatorForTriggerNodes( + NODE_CREATOR_OPEN_SOURCES.ADD_EVALUATION_TRIGGER_BUTTON, + ); + } else if (route.query.action === 'addEvaluationNode') { + nodeCreatorStore.openNodeCreatorForActions( + EVALUATION_NODE_TYPE, + NODE_CREATOR_OPEN_SOURCES.ADD_EVALUATION_NODE_BUTTON, + ); + } else if (route.query.action === 'executeEvaluation') { + if (evaluationTriggerNode.value) { + void runEntireWorkflow('node', evaluationTriggerNode.value.name); + } + } + const isAlreadyInitialized = !force && initializedWorkflowId.value && @@ -1361,6 +1382,13 @@ function onOpenChat() { startChat('main'); } +/** + * Evaluation + */ +const evaluationTriggerNode = computed(() => { + return editableWorkflow.value.nodes.find((node) => node.type === EVALUATION_TRIGGER_NODE_TYPE); +}); + /** * History events */ diff --git a/packages/frontend/editor-ui/src/views/TestDefinition/TestDefinitionEditView.vue b/packages/frontend/editor-ui/src/views/TestDefinition/TestDefinitionEditView.vue deleted file mode 100644 index 3e7c16db69..0000000000 --- a/packages/frontend/editor-ui/src/views/TestDefinition/TestDefinitionEditView.vue +++ /dev/null @@ -1,296 +0,0 @@ - - - - - diff --git a/packages/frontend/editor-ui/src/views/TestDefinition/TestDefinitionListView.vue b/packages/frontend/editor-ui/src/views/TestDefinition/TestDefinitionListView.vue deleted file mode 100644 index da481a5b68..0000000000 --- a/packages/frontend/editor-ui/src/views/TestDefinition/TestDefinitionListView.vue +++ /dev/null @@ -1,278 +0,0 @@ - - - - diff --git a/packages/frontend/editor-ui/src/views/TestDefinition/TestDefinitionNewView.vue b/packages/frontend/editor-ui/src/views/TestDefinition/TestDefinitionNewView.vue deleted file mode 100644 index ba91fe145b..0000000000 --- a/packages/frontend/editor-ui/src/views/TestDefinition/TestDefinitionNewView.vue +++ /dev/null @@ -1,87 +0,0 @@ - - - diff --git a/packages/frontend/editor-ui/src/views/TestDefinition/TestDefinitionRootView.vue b/packages/frontend/editor-ui/src/views/TestDefinition/TestDefinitionRootView.vue deleted file mode 100644 index c2623433f2..0000000000 --- a/packages/frontend/editor-ui/src/views/TestDefinition/TestDefinitionRootView.vue +++ /dev/null @@ -1,37 +0,0 @@ - - - - - diff --git a/packages/frontend/editor-ui/src/views/TestDefinition/TestDefinitionRunDetailView.vue b/packages/frontend/editor-ui/src/views/TestDefinition/TestDefinitionRunDetailView.vue deleted file mode 100644 index 34e7ad174a..0000000000 --- a/packages/frontend/editor-ui/src/views/TestDefinition/TestDefinitionRunDetailView.vue +++ /dev/null @@ -1,409 +0,0 @@ - - - - - diff --git a/packages/frontend/editor-ui/src/views/TestDefinition/tests/TestDefinitionEditView.test.ts b/packages/frontend/editor-ui/src/views/TestDefinition/tests/TestDefinitionEditView.test.ts deleted file mode 100644 index ddb32e0c57..0000000000 --- a/packages/frontend/editor-ui/src/views/TestDefinition/tests/TestDefinitionEditView.test.ts +++ /dev/null @@ -1,110 +0,0 @@ -import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; -import { createTestingPinia } from '@pinia/testing'; -import { createComponentRenderer } from '@/__tests__/render'; -import TestDefinitionEditView from '@/views/TestDefinition/TestDefinitionEditView.vue'; -import type { useTestDefinitionForm } from '@/components/TestDefinition/composables/useTestDefinitionForm'; -import { ref } from 'vue'; -import { cleanupAppModals, createAppModals, mockedStore } from '@/__tests__/utils'; -import { useTestDefinitionStore } from '@/stores/testDefinition.store.ee'; -import userEvent from '@testing-library/user-event'; - -const form: Partial> = { - state: ref({ - name: { value: '', isEditing: false, tempValue: '' }, - description: { value: '', isEditing: false, tempValue: '' }, - tags: { value: [], tempValue: [], isEditing: false }, - evaluationWorkflow: { mode: 'list', value: '', __rl: true }, - mockedNodes: [], - }), - loadTestData: vi.fn(), - cancelEditing: vi.fn(), - updateTest: vi.fn(), - startEditing: vi.fn(), - saveChanges: vi.fn(), - createTest: vi.fn(), -}; -vi.mock('@/components/TestDefinition/composables/useTestDefinitionForm', () => ({ - useTestDefinitionForm: () => form, -})); - -const renderComponent = createComponentRenderer(TestDefinitionEditView, { - props: { testId: '1', name: 'workflow-name' }, -}); - -describe('TestDefinitionEditView', () => { - beforeEach(() => { - createTestingPinia(); - createAppModals(); - }); - - afterEach(() => { - vi.clearAllMocks(); - cleanupAppModals(); - }); - - it('should load test data', async () => { - renderComponent(); - expect(form.loadTestData).toHaveBeenCalledWith('1', 'workflow-name'); - }); - - it('should display disabled "run test" button when editing test without tags', async () => { - const testDefinitionStore = mockedStore(useTestDefinitionStore); - - testDefinitionStore.getFieldIssues.mockReturnValueOnce([ - { field: 'tags', message: 'Tag is required' }, - ]); - - const { getByTestId } = renderComponent(); - - const updateButton = getByTestId('run-test-button'); - expect(updateButton.textContent?.toLowerCase()).toContain('run test'); - expect(updateButton).toHaveClass('disabled'); - }); - - it('should apply "has-issues" class to inputs with issues', async () => { - const testDefinitionStore = mockedStore(useTestDefinitionStore); - - testDefinitionStore.getFieldIssues.mockReturnValueOnce([ - { field: 'evaluationWorkflow', message: 'No evaluation workflow set' }, - ]); - - const { container } = renderComponent(); - const issueElements = container.querySelectorAll('.has-issues'); - expect(issueElements.length).toBeGreaterThan(0); - }); - - describe('Test Runs functionality', () => { - it('should display test runs table when runs exist', async () => { - const testDefinitionStore = mockedStore(useTestDefinitionStore); - testDefinitionStore.testRunsById = { - run1: { - id: 'run1', - testDefinitionId: '1', - status: 'completed', - runAt: '2023-01-01', - createdAt: '2023-01-01', - updatedAt: '2023-01-01', - completedAt: '2023-01-01', - }, - }; - - const { getByTestId } = renderComponent(); - expect(getByTestId('past-runs-table')).toBeInTheDocument(); - }); - - it('should not display test runs table when no runs exist', async () => { - const { queryByTestId } = renderComponent(); - expect(queryByTestId('past-runs-table')).not.toBeInTheDocument(); - }); - - it('should start a test run when run test button is clicked', async () => { - const testDefinitionStore = mockedStore(useTestDefinitionStore); - const { getByTestId } = renderComponent(); - - await userEvent.click(getByTestId('run-test-button')); - - expect(testDefinitionStore.startTestRun).toHaveBeenCalledWith('1'); - expect(testDefinitionStore.fetchTestRuns).toHaveBeenCalledWith('1'); - }); - }); -}); diff --git a/packages/frontend/editor-ui/src/views/TestDefinition/tests/TestDefinitionListView.test.ts b/packages/frontend/editor-ui/src/views/TestDefinition/tests/TestDefinitionListView.test.ts deleted file mode 100644 index 82220687a8..0000000000 --- a/packages/frontend/editor-ui/src/views/TestDefinition/tests/TestDefinitionListView.test.ts +++ /dev/null @@ -1,184 +0,0 @@ -import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; -import { createTestingPinia } from '@pinia/testing'; -import { createComponentRenderer } from '@/__tests__/render'; -import TestDefinitionListView from '@/views/TestDefinition/TestDefinitionListView.vue'; -import type { useToast } from '@/composables/useToast'; -import type { useMessage } from '@/composables/useMessage'; -import { useTestDefinitionStore } from '@/stores/testDefinition.store.ee'; -import { mockedStore } from '@/__tests__/utils'; -import { MODAL_CONFIRM } from '@/constants'; -import type { TestDefinitionRecord } from '@/api/testDefinition.ee'; -import userEvent from '@testing-library/user-event'; -import { within, waitFor } from '@testing-library/dom'; - -const renderComponent = createComponentRenderer(TestDefinitionListView); - -const workflowId = 'workflow1'; -const mockTestDefinitions: TestDefinitionRecord[] = [ - { - id: '1', - name: 'Test 1', - workflowId, - updatedAt: '2023-01-01T00:00:00.000Z', - createdAt: '2023-01-01T00:00:00.000Z', - annotationTagId: 'tag1', - }, - { - id: '2', - name: 'Test 2', - workflowId, - updatedAt: '2023-01-02T00:00:00.000Z', - createdAt: '2023-01-01T00:00:00.000Z', - }, - { - id: '3', - name: 'Test 3', - workflowId, - updatedAt: '2023-01-03T00:00:00.000Z', - createdAt: '2023-01-01T00:00:00.000Z', - }, -]; - -const toast = vi.hoisted( - () => - ({ - showMessage: vi.fn(), - showError: vi.fn(), - }) satisfies Partial>, -); - -vi.mock('@/composables/useToast', () => ({ - useToast: () => toast, -})); - -const message = vi.hoisted( - () => - ({ - confirm: vi.fn(), - }) satisfies Partial>, -); - -vi.mock('@/composables/useMessage', () => ({ - useMessage: () => message, -})); - -describe('TestDefinitionListView', () => { - beforeEach(() => { - createTestingPinia(); - }); - - afterEach(() => { - vi.clearAllMocks(); - }); - - it('should render loader', async () => { - const { getByTestId } = renderComponent({ props: { name: 'any' } }); - expect(getByTestId('test-definition-loader')).toBeTruthy(); - }); - - it('should render empty state when no tests exist', async () => { - const testDefinitionStore = mockedStore(useTestDefinitionStore); - testDefinitionStore.allTestDefinitionsByWorkflowId = {}; - - const { getByTestId } = renderComponent({ props: { name: 'any' } }); - await waitFor(() => expect(getByTestId('test-definition-empty-state')).toBeTruthy()); - }); - - it('should render tests list when tests exist', async () => { - const testDefinitionStore = mockedStore(useTestDefinitionStore); - testDefinitionStore.allTestDefinitionsByWorkflowId[workflowId] = mockTestDefinitions; - - const { getByTestId } = renderComponent({ props: { name: workflowId } }); - - await waitFor(() => expect(getByTestId('test-definition-list')).toBeTruthy()); - }); - - it('should load initial base on route param', async () => { - const testDefinitionStore = mockedStore(useTestDefinitionStore); - renderComponent({ props: { name: workflowId } }); - expect(testDefinitionStore.fetchAll).toHaveBeenCalledWith({ workflowId }); - }); - - it('should start test run and show success message', async () => { - const testDefinitionStore = mockedStore(useTestDefinitionStore); - testDefinitionStore.allTestDefinitionsByWorkflowId[workflowId] = mockTestDefinitions; - testDefinitionStore.startTestRun.mockResolvedValueOnce({ success: true }); - - const { getByTestId } = renderComponent({ props: { name: workflowId } }); - - await waitFor(() => expect(getByTestId('test-definition-list')).toBeTruthy()); - - const testToRun = mockTestDefinitions[0].id; - await userEvent.click(getByTestId(`run-test-${testToRun}`)); - - expect(testDefinitionStore.startTestRun).toHaveBeenCalledWith(testToRun); - expect(toast.showMessage).toHaveBeenCalledWith(expect.objectContaining({ type: 'success' })); - expect(testDefinitionStore.fetchTestRuns).toHaveBeenCalledWith(testToRun); - }); - - it('should show error message on failed test run', async () => { - const testDefinitionStore = mockedStore(useTestDefinitionStore); - testDefinitionStore.allTestDefinitionsByWorkflowId[workflowId] = mockTestDefinitions; - testDefinitionStore.startTestRun.mockRejectedValueOnce(new Error('Run failed')); - - const { getByTestId } = renderComponent({ props: { name: workflowId } }); - - await waitFor(() => expect(getByTestId('test-definition-list')).toBeTruthy()); - - const testToRun = mockTestDefinitions[0].id; - await userEvent.click(getByTestId(`run-test-${testToRun}`)); - - expect(toast.showError).toHaveBeenCalledWith(expect.any(Error), expect.any(String)); - }); - - it('should delete test and show success message', async () => { - const testDefinitionStore = mockedStore(useTestDefinitionStore); - testDefinitionStore.allTestDefinitionsByWorkflowId[workflowId] = mockTestDefinitions; - - message.confirm.mockResolvedValueOnce(MODAL_CONFIRM); - - const { getByTestId, queryByTestId } = renderComponent({ - props: { name: workflowId }, - }); - - await waitFor(() => expect(getByTestId('test-definition-list')).toBeTruthy()); - - const testToDelete = mockTestDefinitions[0].id; - - const trigger = getByTestId(`test-actions-${testToDelete}`); - await userEvent.click(trigger); - - const dropdownId = within(trigger).getByRole('button').getAttribute('aria-controls'); - const dropdown = document.querySelector(`#${dropdownId}`); - expect(dropdown).toBeInTheDocument(); - - await userEvent.click(await within(dropdown as HTMLElement).findByText('Delete')); - - expect(testDefinitionStore.deleteById).toHaveBeenCalledWith(testToDelete); - expect(toast.showMessage).toHaveBeenCalledWith(expect.objectContaining({ type: 'success' })); - - /** - * since the actions are mocked by default, - * double check the UI updates correctly - */ - testDefinitionStore.allTestDefinitionsByWorkflowId = { - [workflowId]: [mockTestDefinitions[1], mockTestDefinitions[2]], - }; - await waitFor(() => - expect(queryByTestId(`test-actions-${testToDelete}`)).not.toBeInTheDocument(), - ); - }); - - it('should sort tests by updated date in descending order', async () => { - const testDefinitionStore = mockedStore(useTestDefinitionStore); - testDefinitionStore.allTestDefinitionsByWorkflowId[workflowId] = mockTestDefinitions; - - const { container, getByTestId } = renderComponent({ props: { name: workflowId } }); - await waitFor(() => expect(getByTestId('test-definition-list')).toBeTruthy()); - - const testItems = container.querySelectorAll('[data-test-id^="test-item-"]'); - expect(testItems[0].getAttribute('data-test-id')).toBe('test-item-3'); - expect(testItems[1].getAttribute('data-test-id')).toBe('test-item-2'); - expect(testItems[2].getAttribute('data-test-id')).toBe('test-item-1'); - }); -}); diff --git a/packages/frontend/editor-ui/src/views/TestDefinition/tests/TestDefinitionNewView.test.ts b/packages/frontend/editor-ui/src/views/TestDefinition/tests/TestDefinitionNewView.test.ts deleted file mode 100644 index ada4e7a677..0000000000 --- a/packages/frontend/editor-ui/src/views/TestDefinition/tests/TestDefinitionNewView.test.ts +++ /dev/null @@ -1,88 +0,0 @@ -import { describe, it, expect, vi, beforeEach, afterEach, type Mock } from 'vitest'; -import { createTestingPinia } from '@pinia/testing'; -import { createComponentRenderer } from '@/__tests__/render'; -import TestDefinitionNewView from '@/views/TestDefinition/TestDefinitionNewView.vue'; -import { ref } from 'vue'; -import { mockedStore } from '@/__tests__/utils'; -import { useTestDefinitionStore } from '@/stores/testDefinition.store.ee'; -import { useAnnotationTagsStore } from '@/stores/tags.store'; -import { useRoute } from 'vue-router'; -import { useExecutionsStore } from '@/stores/executions.store'; -import { waitFor } from '@testing-library/vue'; - -const workflowId = 'workflow_id'; -const testId = 'test_id'; - -const mockedForm = { - state: ref({ tags: { value: [] }, name }), - createTest: vi.fn().mockResolvedValue({ - id: testId, - name: 'test_name', - workflowId, - createdAt: '', - }), - updateTest: vi.fn().mockResolvedValue({}), -}; -vi.mock('@/components/TestDefinition/composables/useTestDefinitionForm', () => ({ - useTestDefinitionForm: vi.fn().mockImplementation(() => mockedForm), -})); - -const mockReplace = vi.fn(); -vi.mock('vue-router', async (importOriginal) => ({ - // eslint-disable-next-line @typescript-eslint/consistent-type-imports - ...(await importOriginal()), - useRoute: vi.fn().mockReturnValue({}), - useRouter: vi.fn(() => ({ - replace: mockReplace, - })), -})); - -describe('TestDefinitionRootView', () => { - const renderComponent = createComponentRenderer(TestDefinitionNewView); - - beforeEach(() => { - createTestingPinia(); - }); - - afterEach(() => { - vi.clearAllMocks(); - }); - - it('should create a test adn redirect', async () => { - const testDefinitionStore = mockedStore(useTestDefinitionStore); - const annotationTagsStore = mockedStore(useAnnotationTagsStore); - - annotationTagsStore.create.mockResolvedValueOnce({ id: 'tag_id', name: 'tag_name' }); - renderComponent({ props: { name: workflowId } }); - - expect(mockedForm.createTest).toHaveBeenCalledWith(workflowId); - await waitFor(() => - expect(testDefinitionStore.updateRunFieldIssues).toHaveBeenCalledWith(testId), - ); - - expect(mockReplace).toHaveBeenCalledWith( - expect.objectContaining({ - params: { - testId, - }, - }), - ); - }); - - it('should assign an execution to a test', async () => { - (useRoute as Mock).mockReturnValue({ - query: { executionId: 'execution_id', annotationTags: ['2', '3'] }, - }); - const annotationTagsStore = mockedStore(useAnnotationTagsStore); - const executionsStore = mockedStore(useExecutionsStore); - - annotationTagsStore.create.mockResolvedValueOnce({ id: 'tag_id', name: 'tag_name' }); - renderComponent({ props: { name: workflowId } }); - - await waitFor(() => - expect(executionsStore.annotateExecution).toHaveBeenCalledWith('execution_id', { - tags: ['2', '3', 'tag_id'], - }), - ); - }); -}); diff --git a/packages/frontend/editor-ui/src/views/TestDefinition/tests/TestDefinitionRootView.test.ts b/packages/frontend/editor-ui/src/views/TestDefinition/tests/TestDefinitionRootView.test.ts deleted file mode 100644 index 8e8738ec78..0000000000 --- a/packages/frontend/editor-ui/src/views/TestDefinition/tests/TestDefinitionRootView.test.ts +++ /dev/null @@ -1,63 +0,0 @@ -import { describe, it, expect, beforeEach } from 'vitest'; -import { createTestingPinia } from '@pinia/testing'; -import { createComponentRenderer } from '@/__tests__/render'; -import TestDefinitionRootView from '../TestDefinitionRootView.vue'; - -import { useWorkflowsStore } from '@/stores/workflows.store'; -import { mockedStore } from '@/__tests__/utils'; -import type { IWorkflowDb } from '@/Interface'; - -import { waitFor } from '@testing-library/vue'; - -describe('TestDefinitionRootView', () => { - const renderComponent = createComponentRenderer(TestDefinitionRootView); - - const mockWorkflow: IWorkflowDb = { - id: 'different-id', - name: 'Test Workflow', - active: false, - isArchived: false, - createdAt: Date.now(), - updatedAt: Date.now(), - nodes: [], - connections: {}, - settings: { - executionOrder: 'v1', - }, - tags: [], - pinData: {}, - versionId: '', - usedCredentials: [], - }; - - beforeEach(() => { - createTestingPinia(); - }); - - it('should initialize workflow on mount if not already initialized', async () => { - const workflowsStore = mockedStore(useWorkflowsStore); - workflowsStore.workflow = mockWorkflow; - const newWorkflowId = 'workflow123'; - - renderComponent({ props: { name: newWorkflowId } }); - - expect(workflowsStore.fetchWorkflow).toHaveBeenCalledWith(newWorkflowId); - }); - - it('should not initialize workflow if already loaded', async () => { - const workflowsStore = mockedStore(useWorkflowsStore); - workflowsStore.workflow = mockWorkflow; - - renderComponent({ props: { name: mockWorkflow.id } }); - - expect(workflowsStore.fetchWorkflow).not.toHaveBeenCalled(); - }); - - it('should render router view', async () => { - const workflowsStore = mockedStore(useWorkflowsStore); - workflowsStore.fetchWorkflow.mockResolvedValue(mockWorkflow); - const { container } = renderComponent({ props: { name: mockWorkflow.id } }); - - await waitFor(() => expect(container.querySelector('router-view')).toBeTruthy()); - }); -});