feat(editor): Evaluations frontend (no-changelog) (#15550)

Co-authored-by: Yiorgis Gozadinos <yiorgis@n8n.io>
Co-authored-by: JP van Oosten <jp@n8n.io>
Co-authored-by: Giulio Andreini <g.andreini@gmail.com>
Co-authored-by: Michael Kret <michael.k@radency.com>
This commit is contained in:
Eugene
2025-05-26 12:26:28 +02:00
committed by GitHub
parent 3ee15a8331
commit ca8f087a47
87 changed files with 3460 additions and 5103 deletions

View File

@@ -195,4 +195,7 @@ export interface FrontendSettings {
dashboard: boolean;
dateRanges: InsightsDateRange[];
};
evaluation: {
quota: number;
};
}

View File

@@ -189,4 +189,8 @@ export class LicenseState {
getMaxTeamProjects() {
return this.getValue('quota:maxTeamProjects') ?? 0;
}
getMaxWorkflowsWithEvaluations() {
return this.getValue('quota:evaluations:maxWorkflows') ?? 0;
}
}

View File

@@ -39,6 +39,7 @@ export const LICENSE_QUOTAS = {
INSIGHTS_MAX_HISTORY_DAYS: 'quota:insights:maxHistoryDays',
INSIGHTS_RETENTION_MAX_AGE_DAYS: 'quota:insights:retention:maxAgeDays',
INSIGHTS_RETENTION_PRUNE_INTERVAL_DAYS: 'quota:insights:retention:pruneIntervalDays',
WORKFLOWS_WITH_EVALUATION_LIMIT: 'quota:evaluations:maxWorkflows',
} as const;
export const UNLIMITED_LICENSE_QUOTA = -1;

View File

@@ -286,9 +286,17 @@ export type TestRunErrorCode =
| 'TEST_CASES_NOT_FOUND'
| 'INTERRUPTED'
| 'UNKNOWN_ERROR'
| 'EVALUATION_TRIGGER_NOT_FOUND';
| 'EVALUATION_TRIGGER_NOT_FOUND'
| 'EVALUATION_TRIGGER_NOT_CONFIGURED'
| 'EVALUATION_TRIGGER_DISABLED'
| 'SET_OUTPUTS_NODE_NOT_FOUND'
| 'SET_OUTPUTS_NODE_NOT_CONFIGURED'
| 'SET_METRICS_NODE_NOT_FOUND'
| 'SET_METRICS_NODE_NOT_CONFIGURED'
| 'CANT_FETCH_TEST_CASES';
export type TestCaseExecutionErrorCode =
| 'NO_METRICS_COLLECTED'
| 'MOCKED_NODE_NOT_FOUND' // This will be used when node mocking will be implemented
| 'FAILED_TO_EXECUTE_WORKFLOW'
| 'INVALID_METRICS'

View File

@@ -33,12 +33,14 @@ export class LicenseMetricsRepository extends Repository<LicenseMetrics> {
production_executions_count: string | number;
production_root_executions_count: string | number;
manual_executions_count: string | number;
evaluations_count: string | number;
};
const userTable = this.toTableName('user');
const workflowTable = this.toTableName('workflow_entity');
const credentialTable = this.toTableName('credentials_entity');
const workflowStatsTable = this.toTableName('workflow_statistics');
const testRunTable = this.toTableName('test_run');
const [
{
@@ -50,6 +52,7 @@ export class LicenseMetricsRepository extends Repository<LicenseMetrics> {
production_executions_count: productionExecutions,
production_root_executions_count: productionRootExecutions,
manual_executions_count: manualExecutions,
evaluations_count: evaluations,
},
] = (await this.query(`
SELECT
@@ -60,7 +63,8 @@ export class LicenseMetricsRepository extends Repository<LicenseMetrics> {
(SELECT COUNT(*) FROM ${credentialTable}) AS total_credentials_count,
(SELECT SUM(count) FROM ${workflowStatsTable} WHERE name IN ('production_success', 'production_error')) AS production_executions_count,
(SELECT SUM(${this.toColumnName('rootCount')}) FROM ${workflowStatsTable} WHERE name IN ('production_success', 'production_error')) AS production_root_executions_count,
(SELECT SUM(count) FROM ${workflowStatsTable} WHERE name IN ('manual_success', 'manual_error')) AS manual_executions_count;
(SELECT SUM(count) FROM ${workflowStatsTable} WHERE name IN ('manual_success', 'manual_error')) AS manual_executions_count,
(SELECT COUNT(distinct ${this.toColumnName('workflowId')}) FROM ${testRunTable}) AS evaluations_count;
`)) as Row[];
const toNumber = (value: string | number) =>
@@ -75,6 +79,7 @@ export class LicenseMetricsRepository extends Repository<LicenseMetrics> {
productionExecutions: toNumber(productionExecutions),
productionRootExecutions: toNumber(productionRootExecutions),
manualExecutions: toNumber(manualExecutions),
evaluations: toNumber(evaluations),
};
}
}

View File

@@ -128,6 +128,16 @@ export class WorkflowRepository extends Repository<WorkflowEntity> {
.execute();
}
async getWorkflowsWithEvaluationCount() {
// Count workflows having test runs
const totalWorkflowCount = await this.createQueryBuilder('workflow')
.innerJoin('workflow.testRuns', 'testrun')
.distinct(true)
.getCount();
return totalWorkflowCount ?? 0;
}
private buildBaseUnionQuery(workflowIds: string[], options: ListQuery.Options = {}) {
const subQueryParameters: ListQuery.Options = {
select: {

View File

@@ -157,6 +157,3 @@ export const WsStatusCodes = {
} as const;
export const FREE_AI_CREDITS_CREDENTIAL_NAME = 'n8n free OpenAI API credits';
export const EVALUATION_NODE = `${NODE_PACKAGE_PREFIX}base.evaluation`;
export const EVALUATION_DATASET_TRIGGER_NODE = `${NODE_PACKAGE_PREFIX}base.evaluationTrigger`;

View File

@@ -117,6 +117,7 @@ export class E2EController {
[LICENSE_QUOTAS.INSIGHTS_MAX_HISTORY_DAYS]: 7,
[LICENSE_QUOTAS.INSIGHTS_RETENTION_MAX_AGE_DAYS]: 30,
[LICENSE_QUOTAS.INSIGHTS_RETENTION_PRUNE_INTERVAL_DAYS]: 180,
[LICENSE_QUOTAS.WORKFLOWS_WITH_EVALUATION_LIMIT]: 1,
};
private numericFeatures: Record<NumericLicenseFeature, number> = {
@@ -137,6 +138,8 @@ export class E2EController {
E2EController.numericFeaturesDefaults[LICENSE_QUOTAS.INSIGHTS_RETENTION_MAX_AGE_DAYS],
[LICENSE_QUOTAS.INSIGHTS_RETENTION_PRUNE_INTERVAL_DAYS]:
E2EController.numericFeaturesDefaults[LICENSE_QUOTAS.INSIGHTS_RETENTION_PRUNE_INTERVAL_DAYS],
[LICENSE_QUOTAS.WORKFLOWS_WITH_EVALUATION_LIMIT]:
E2EController.numericFeaturesDefaults[LICENSE_QUOTAS.WORKFLOWS_WITH_EVALUATION_LIMIT],
};
constructor(

View File

@@ -5,12 +5,12 @@ import type { WorkflowRepository } from '@n8n/db';
import { readFileSync } from 'fs';
import { mock } from 'jest-mock-extended';
import type { ErrorReporter } from 'n8n-core';
import { EVALUATION_NODE_TYPE, EVALUATION_TRIGGER_NODE_TYPE } from 'n8n-workflow';
import type { IWorkflowBase } from 'n8n-workflow';
import type { IRun } from 'n8n-workflow';
import type { IRun, ExecutionError } from 'n8n-workflow';
import path from 'path';
import type { ActiveExecutions } from '@/active-executions';
import { EVALUATION_DATASET_TRIGGER_NODE } from '@/constants';
import { TestRunError } from '@/evaluation.ee/test-runner/errors.ee';
import { LoadNodesAndCredentials } from '@/load-nodes-and-credentials';
import type { Telemetry } from '@/telemetry';
@@ -59,7 +59,7 @@ describe('TestRunnerService', () => {
jest.resetAllMocks();
});
describe('findTriggerNode', () => {
describe('findEvaluationTriggerNode', () => {
test('should find the trigger node in a workflow', () => {
// Setup a test workflow with a trigger node
const workflowWithTrigger = mock<IWorkflowBase>({
@@ -67,7 +67,7 @@ describe('TestRunnerService', () => {
{
id: 'node1',
name: 'Dataset Trigger',
type: EVALUATION_DATASET_TRIGGER_NODE,
type: EVALUATION_TRIGGER_NODE_TYPE,
typeVersion: 1,
position: [0, 0],
parameters: {},
@@ -85,11 +85,11 @@ describe('TestRunnerService', () => {
});
// Use the protected method via any type casting
const result = (testRunnerService as any).findTriggerNode(workflowWithTrigger);
const result = (testRunnerService as any).findEvaluationTriggerNode(workflowWithTrigger);
// Assert the result is the correct node
expect(result).toBeDefined();
expect(result.type).toBe(EVALUATION_DATASET_TRIGGER_NODE);
expect(result.type).toBe(EVALUATION_TRIGGER_NODE_TYPE);
expect(result.name).toBe('Dataset Trigger');
});
@@ -118,16 +118,16 @@ describe('TestRunnerService', () => {
});
// Call the function and expect undefined result
const result = (testRunnerService as any).findTriggerNode(workflowWithoutTrigger);
const result = (testRunnerService as any).findEvaluationTriggerNode(workflowWithoutTrigger);
expect(result).toBeUndefined();
});
test('should work with the actual workflow.under-test.json', () => {
const result = (testRunnerService as any).findTriggerNode(wfUnderTestJson);
const result = (testRunnerService as any).findEvaluationTriggerNode(wfUnderTestJson);
// Assert the result is the correct node
expect(result).toBeDefined();
expect(result.type).toBe(EVALUATION_DATASET_TRIGGER_NODE);
expect(result.type).toBe(EVALUATION_TRIGGER_NODE_TYPE);
expect(result.name).toBe('When fetching a dataset row');
});
});
@@ -140,7 +140,7 @@ describe('TestRunnerService', () => {
{
id: 'triggerNodeId',
name: 'TriggerNode',
type: EVALUATION_DATASET_TRIGGER_NODE,
type: EVALUATION_TRIGGER_NODE_TYPE,
typeVersion: 1,
position: [0, 0],
parameters: {},
@@ -164,6 +164,7 @@ describe('TestRunnerService', () => {
data: {
main: [mockOutputItems],
},
error: undefined,
},
],
},
@@ -185,7 +186,7 @@ describe('TestRunnerService', () => {
{
id: 'triggerNodeId',
name: 'TriggerNode',
type: EVALUATION_DATASET_TRIGGER_NODE,
type: EVALUATION_TRIGGER_NODE_TYPE,
typeVersion: 1,
position: [0, 0],
parameters: {},
@@ -217,6 +218,51 @@ describe('TestRunnerService', () => {
}
});
test('should throw an error if evaluation trigger could not fetch data', () => {
// Create workflow with a trigger node
const workflow = mock<IWorkflowBase>({
nodes: [
{
id: 'triggerNodeId',
name: 'TriggerNode',
type: EVALUATION_TRIGGER_NODE_TYPE,
typeVersion: 1,
position: [0, 0],
parameters: {},
},
],
connections: {},
});
// Create execution data with missing output
const execution = mock<IRun>({
data: {
resultData: {
runData: {
TriggerNode: [
{
error: mock<ExecutionError>(),
},
],
},
},
},
});
// Expect the method to throw an error
expect(() => {
(testRunnerService as any).extractDatasetTriggerOutput(execution, workflow);
}).toThrow(TestRunError);
// Verify the error has the correct code
try {
(testRunnerService as any).extractDatasetTriggerOutput(execution, workflow);
} catch (error) {
expect(error).toBeInstanceOf(TestRunError);
expect(error.code).toBe('CANT_FETCH_TEST_CASES');
}
});
test('should throw an error if trigger node output is empty list', () => {
// Create workflow with a trigger node
const workflow = mock<IWorkflowBase>({
@@ -224,7 +270,7 @@ describe('TestRunnerService', () => {
{
id: 'triggerNodeId',
name: 'TriggerNode',
type: EVALUATION_DATASET_TRIGGER_NODE,
type: EVALUATION_TRIGGER_NODE_TYPE,
typeVersion: 1,
position: [0, 0],
parameters: {},
@@ -243,6 +289,7 @@ describe('TestRunnerService', () => {
data: {
main: [[]], // Empty list
},
error: undefined,
},
],
},
@@ -271,7 +318,7 @@ describe('TestRunnerService', () => {
{
id: 'triggerNodeId',
name: "When clicking 'Execute workflow'",
type: EVALUATION_DATASET_TRIGGER_NODE,
type: EVALUATION_TRIGGER_NODE_TYPE,
typeVersion: 1,
position: [0, 0],
parameters: {},
@@ -297,6 +344,7 @@ describe('TestRunnerService', () => {
data: {
main: [expectedItems],
},
error: undefined,
},
],
},
@@ -374,7 +422,7 @@ describe('TestRunnerService', () => {
{
id: 'node1',
name: triggerNodeName,
type: EVALUATION_DATASET_TRIGGER_NODE,
type: EVALUATION_TRIGGER_NODE_TYPE,
typeVersion: 1,
position: [0, 0],
parameters: {},
@@ -427,7 +475,7 @@ describe('TestRunnerService', () => {
{
id: 'node1',
name: triggerNodeName,
type: EVALUATION_DATASET_TRIGGER_NODE,
type: EVALUATION_TRIGGER_NODE_TYPE,
typeVersion: 1,
position: [0, 0],
parameters: {},
@@ -531,7 +579,7 @@ describe('TestRunnerService', () => {
{
id: 'node1',
name: triggerNodeName,
type: EVALUATION_DATASET_TRIGGER_NODE,
type: EVALUATION_TRIGGER_NODE_TYPE,
typeVersion: 1,
position: [0, 0],
parameters: {},
@@ -586,7 +634,7 @@ describe('TestRunnerService', () => {
{
id: 'node1',
name: triggerNodeName,
type: EVALUATION_DATASET_TRIGGER_NODE,
type: EVALUATION_TRIGGER_NODE_TYPE,
typeVersion: 1,
position: [0, 0],
parameters: {},
@@ -632,4 +680,554 @@ describe('TestRunnerService', () => {
}
});
});
describe('validateSetMetricsNodes', () => {
it('should pass when metrics nodes are properly configured', () => {
const workflow = mock<IWorkflowBase>({
nodes: [
{
id: 'node1',
name: 'Set Metrics',
type: EVALUATION_NODE_TYPE,
typeVersion: 1,
position: [0, 0],
parameters: {
operation: 'setMetrics',
metrics: {
assignments: [
{
id: '1',
name: 'accuracy',
value: 0.95,
},
{
id: '2',
name: 'precision',
value: 0.87,
},
],
},
},
},
],
connections: {},
});
expect(() => {
(testRunnerService as any).validateSetMetricsNodes(workflow);
}).not.toThrow();
});
it('should throw SET_METRICS_NODE_NOT_FOUND when no metrics nodes exist', () => {
const workflow = mock<IWorkflowBase>({
nodes: [
{
id: 'node1',
name: 'Regular Node',
type: 'n8n-nodes-base.noOp',
typeVersion: 1,
position: [0, 0],
parameters: {},
},
],
connections: {},
});
expect(() => {
(testRunnerService as any).validateSetMetricsNodes(workflow);
}).toThrow(TestRunError);
try {
(testRunnerService as any).validateSetMetricsNodes(workflow);
} catch (error) {
expect(error).toBeInstanceOf(TestRunError);
expect(error.code).toBe('SET_METRICS_NODE_NOT_FOUND');
}
});
it('should throw SET_METRICS_NODE_NOT_CONFIGURED when metrics node has no parameters', () => {
const workflow = mock<IWorkflowBase>({
nodes: [
{
id: 'node1',
name: 'Set Metrics',
type: EVALUATION_NODE_TYPE,
typeVersion: 1,
position: [0, 0],
parameters: {
operation: 'setMetrics',
metrics: undefined,
},
},
],
connections: {},
});
expect(() => {
(testRunnerService as any).validateSetMetricsNodes(workflow);
}).toThrow(TestRunError);
try {
(testRunnerService as any).validateSetMetricsNodes(workflow);
} catch (error) {
expect(error).toBeInstanceOf(TestRunError);
expect(error.code).toBe('SET_METRICS_NODE_NOT_CONFIGURED');
expect(error.extra).toEqual({ node_name: 'Set Metrics' });
}
});
it('should throw SET_METRICS_NODE_NOT_CONFIGURED when metrics node has empty assignments', () => {
const workflow = mock<IWorkflowBase>({
nodes: [
{
id: 'node1',
name: 'Set Metrics',
type: EVALUATION_NODE_TYPE,
typeVersion: 1,
position: [0, 0],
parameters: {
operation: 'setMetrics',
metrics: {
assignments: [],
},
},
},
],
connections: {},
});
expect(() => {
(testRunnerService as any).validateSetMetricsNodes(workflow);
}).toThrow(TestRunError);
try {
(testRunnerService as any).validateSetMetricsNodes(workflow);
} catch (error) {
expect(error).toBeInstanceOf(TestRunError);
expect(error.code).toBe('SET_METRICS_NODE_NOT_CONFIGURED');
expect(error.extra).toEqual({ node_name: 'Set Metrics' });
}
});
it('should throw SET_METRICS_NODE_NOT_CONFIGURED when assignment has no name', () => {
const workflow = mock<IWorkflowBase>({
nodes: [
{
id: 'node1',
name: 'Set Metrics',
type: EVALUATION_NODE_TYPE,
typeVersion: 1,
position: [0, 0],
parameters: {
operation: 'setMetrics',
metrics: {
assignments: [
{
id: '1',
name: '',
value: 0.95,
},
],
},
},
},
],
connections: {},
});
expect(() => {
(testRunnerService as any).validateSetMetricsNodes(workflow);
}).toThrow(TestRunError);
try {
(testRunnerService as any).validateSetMetricsNodes(workflow);
} catch (error) {
expect(error).toBeInstanceOf(TestRunError);
expect(error.code).toBe('SET_METRICS_NODE_NOT_CONFIGURED');
expect(error.extra).toEqual({ node_name: 'Set Metrics' });
}
});
it('should throw SET_METRICS_NODE_NOT_CONFIGURED when assignment has null value', () => {
const workflow = mock<IWorkflowBase>({
nodes: [
{
id: 'node1',
name: 'Set Metrics',
type: EVALUATION_NODE_TYPE,
typeVersion: 1,
position: [0, 0],
parameters: {
operation: 'setMetrics',
metrics: {
assignments: [
{
id: '1',
name: 'accuracy',
value: null,
},
],
},
},
},
],
connections: {},
});
expect(() => {
(testRunnerService as any).validateSetMetricsNodes(workflow);
}).toThrow(TestRunError);
try {
(testRunnerService as any).validateSetMetricsNodes(workflow);
} catch (error) {
expect(error).toBeInstanceOf(TestRunError);
expect(error.code).toBe('SET_METRICS_NODE_NOT_CONFIGURED');
expect(error.extra).toEqual({ node_name: 'Set Metrics' });
}
});
it('should validate multiple metrics nodes successfully', () => {
const workflow = mock<IWorkflowBase>({
nodes: [
{
id: 'node1',
name: 'Set Metrics 1',
type: EVALUATION_NODE_TYPE,
typeVersion: 1,
position: [0, 0],
parameters: {
operation: 'setMetrics',
metrics: {
assignments: [
{
id: '1',
name: 'accuracy',
value: 0.95,
},
],
},
},
},
{
id: 'node2',
name: 'Set Metrics 2',
type: EVALUATION_NODE_TYPE,
typeVersion: 1,
position: [100, 0],
parameters: {
operation: 'setMetrics',
metrics: {
assignments: [
{
id: '2',
name: 'precision',
value: 0.87,
},
],
},
},
},
],
connections: {},
});
expect(() => {
(testRunnerService as any).validateSetMetricsNodes(workflow);
}).not.toThrow();
});
});
describe('validateSetOutputsNodes', () => {
it('should pass when outputs nodes are properly configured', () => {
const workflow = mock<IWorkflowBase>({
nodes: [
{
id: 'node1',
name: 'Set Outputs',
type: EVALUATION_NODE_TYPE,
typeVersion: 1,
position: [0, 0],
parameters: {
operation: 'setOutputs',
outputs: {
assignments: [
{
id: '1',
name: 'result',
value: 'success',
},
{
id: '2',
name: 'score',
value: 95,
},
],
},
},
},
],
connections: {},
});
expect(() => {
(testRunnerService as any).validateSetOutputsNodes(workflow);
}).not.toThrow();
});
it('should pass when operation is default (undefined)', () => {
const workflow = mock<IWorkflowBase>({
nodes: [
{
id: 'node1',
name: 'Set Outputs',
type: EVALUATION_NODE_TYPE,
typeVersion: 1,
position: [0, 0],
parameters: {
operation: undefined,
outputs: {
assignments: [
{
id: '1',
name: 'result',
value: 'success',
},
{
id: '2',
name: 'score',
value: 95,
},
],
},
},
},
],
connections: {},
});
expect(() => {
(testRunnerService as any).validateSetOutputsNodes(workflow);
}).not.toThrow();
});
it('should throw SET_OUTPUTS_NODE_NOT_FOUND when no outputs nodes exist', () => {
const workflow = mock<IWorkflowBase>({
nodes: [
{
id: 'node1',
name: 'Regular Node',
type: 'n8n-nodes-base.noOp',
typeVersion: 1,
position: [0, 0],
parameters: {},
},
],
connections: {},
});
expect(() => {
(testRunnerService as any).validateSetOutputsNodes(workflow);
}).toThrow(TestRunError);
try {
(testRunnerService as any).validateSetOutputsNodes(workflow);
} catch (error) {
expect(error).toBeInstanceOf(TestRunError);
expect(error.code).toBe('SET_OUTPUTS_NODE_NOT_FOUND');
}
});
it('should throw SET_OUTPUTS_NODE_NOT_CONFIGURED when outputs node has no parameters', () => {
const workflow = mock<IWorkflowBase>({
nodes: [
{
id: 'node1',
name: 'Set Outputs',
type: EVALUATION_NODE_TYPE,
typeVersion: 1,
position: [0, 0],
parameters: {
operation: 'setOutputs',
outputs: undefined,
},
},
],
connections: {},
});
expect(() => {
(testRunnerService as any).validateSetOutputsNodes(workflow);
}).toThrow(TestRunError);
try {
(testRunnerService as any).validateSetOutputsNodes(workflow);
} catch (error) {
expect(error).toBeInstanceOf(TestRunError);
expect(error.code).toBe('SET_OUTPUTS_NODE_NOT_CONFIGURED');
expect(error.extra).toEqual({ node_name: 'Set Outputs' });
}
});
it('should throw SET_OUTPUTS_NODE_NOT_CONFIGURED when outputs node has empty assignments', () => {
const workflow = mock<IWorkflowBase>({
nodes: [
{
id: 'node1',
name: 'Set Outputs',
type: EVALUATION_NODE_TYPE,
typeVersion: 1,
position: [0, 0],
parameters: {
operation: 'setOutputs',
outputs: {
assignments: [],
},
},
},
],
connections: {},
});
expect(() => {
(testRunnerService as any).validateSetOutputsNodes(workflow);
}).toThrow(TestRunError);
try {
(testRunnerService as any).validateSetOutputsNodes(workflow);
} catch (error) {
expect(error).toBeInstanceOf(TestRunError);
expect(error.code).toBe('SET_OUTPUTS_NODE_NOT_CONFIGURED');
expect(error.extra).toEqual({ node_name: 'Set Outputs' });
}
});
it('should throw SET_OUTPUTS_NODE_NOT_CONFIGURED when assignment has no name', () => {
const workflow = mock<IWorkflowBase>({
nodes: [
{
id: 'node1',
name: 'Set Outputs',
type: EVALUATION_NODE_TYPE,
typeVersion: 1,
position: [0, 0],
parameters: {
operation: 'setOutputs',
outputs: {
assignments: [
{
id: '1',
name: '',
value: 'result',
},
],
},
},
},
],
connections: {},
});
expect(() => {
(testRunnerService as any).validateSetOutputsNodes(workflow);
}).toThrow(TestRunError);
try {
(testRunnerService as any).validateSetOutputsNodes(workflow);
} catch (error) {
expect(error).toBeInstanceOf(TestRunError);
expect(error.code).toBe('SET_OUTPUTS_NODE_NOT_CONFIGURED');
expect(error.extra).toEqual({ node_name: 'Set Outputs' });
}
});
it('should throw SET_OUTPUTS_NODE_NOT_CONFIGURED when assignment has null value', () => {
const workflow = mock<IWorkflowBase>({
nodes: [
{
id: 'node1',
name: 'Set Outputs',
type: EVALUATION_NODE_TYPE,
typeVersion: 1,
position: [0, 0],
parameters: {
operation: 'setOutputs',
outputs: {
assignments: [
{
id: '1',
name: 'result',
value: null,
},
],
},
},
},
],
connections: {},
});
expect(() => {
(testRunnerService as any).validateSetOutputsNodes(workflow);
}).toThrow(TestRunError);
try {
(testRunnerService as any).validateSetOutputsNodes(workflow);
} catch (error) {
expect(error).toBeInstanceOf(TestRunError);
expect(error.code).toBe('SET_OUTPUTS_NODE_NOT_CONFIGURED');
expect(error.extra).toEqual({ node_name: 'Set Outputs' });
}
});
it('should validate multiple outputs nodes successfully', () => {
const workflow = mock<IWorkflowBase>({
nodes: [
{
id: 'node1',
name: 'Set Outputs 1',
type: EVALUATION_NODE_TYPE,
typeVersion: 1,
position: [0, 0],
parameters: {
operation: 'setOutputs',
outputs: {
assignments: [
{
id: '1',
name: 'result',
value: 'success',
},
],
},
},
},
{
id: 'node2',
name: 'Set Outputs 2',
type: EVALUATION_NODE_TYPE,
typeVersion: 1,
position: [100, 0],
parameters: {
operation: 'setOutputs',
outputs: {
assignments: [
{
id: '2',
name: 'score',
value: 95,
},
],
},
},
},
],
connections: {},
});
expect(() => {
(testRunnerService as any).validateSetOutputsNodes(workflow);
}).not.toThrow();
});
});
});

View File

@@ -2,7 +2,11 @@ import type { User, TestRun } from '@n8n/db';
import { TestCaseExecutionRepository, TestRunRepository, WorkflowRepository } from '@n8n/db';
import { Service } from '@n8n/di';
import { ErrorReporter, Logger } from 'n8n-core';
import { ExecutionCancelledError } from 'n8n-workflow';
import {
EVALUATION_NODE_TYPE,
EVALUATION_TRIGGER_NODE_TYPE,
ExecutionCancelledError,
} from 'n8n-workflow';
import type {
IDataObject,
IRun,
@@ -10,13 +14,14 @@ import type {
IWorkflowExecutionDataProcess,
IExecuteData,
INodeExecutionData,
AssignmentCollectionValue,
} from 'n8n-workflow';
import assert from 'node:assert';
import { ActiveExecutions } from '@/active-executions';
import config from '@/config';
import { EVALUATION_DATASET_TRIGGER_NODE, EVALUATION_NODE } from '@/constants';
import { TestCaseExecutionError, TestRunError } from '@/evaluation.ee/test-runner/errors.ee';
import { checkNodeParameterNotEmpty } from '@/evaluation.ee/test-runner/utils.ee';
import { Telemetry } from '@/telemetry';
import { WorkflowRunner } from '@/workflow-runner';
@@ -59,8 +64,97 @@ export class TestRunnerService {
/**
* Finds the dataset trigger node in the workflow
*/
private findTriggerNode(workflow: IWorkflowBase) {
return workflow.nodes.find((node) => node.type === EVALUATION_DATASET_TRIGGER_NODE);
private findEvaluationTriggerNode(workflow: IWorkflowBase) {
return workflow.nodes.find((node) => node.type === EVALUATION_TRIGGER_NODE_TYPE);
}
/**
* Validates the evaluation trigger node is present in the workflow
* and is configured correctly.
*/
private validateEvaluationTriggerNode(workflow: IWorkflowBase) {
const triggerNode = this.findEvaluationTriggerNode(workflow);
if (!triggerNode) {
throw new TestRunError('EVALUATION_TRIGGER_NOT_FOUND');
}
if (
!triggerNode.credentials ||
!checkNodeParameterNotEmpty(triggerNode.parameters?.documentId) ||
!checkNodeParameterNotEmpty(triggerNode.parameters?.sheetName)
) {
throw new TestRunError('EVALUATION_TRIGGER_NOT_CONFIGURED', { node_name: triggerNode.name });
}
if (triggerNode?.disabled) {
throw new TestRunError('EVALUATION_TRIGGER_DISABLED');
}
}
/**
* Checks if the Evaluation Set Metrics nodes are present in the workflow
* and are configured correctly.
*/
private validateSetMetricsNodes(workflow: IWorkflowBase) {
const metricsNodes = TestRunnerService.getEvaluationMetricsNodes(workflow);
if (metricsNodes.length === 0) {
throw new TestRunError('SET_METRICS_NODE_NOT_FOUND');
}
const unconfiguredMetricsNode = metricsNodes.find(
(node) =>
!node.parameters ||
!node.parameters.metrics ||
(node.parameters.metrics as AssignmentCollectionValue).assignments?.length === 0 ||
(node.parameters.metrics as AssignmentCollectionValue).assignments?.some(
(assignment) => !assignment.name || assignment.value === null,
),
);
if (unconfiguredMetricsNode) {
throw new TestRunError('SET_METRICS_NODE_NOT_CONFIGURED', {
node_name: unconfiguredMetricsNode.name,
});
}
}
/**
* Checks if the Evaluation Set Outputs nodes are present in the workflow
* and are configured correctly.
*/
private validateSetOutputsNodes(workflow: IWorkflowBase) {
const setOutputsNodes = TestRunnerService.getEvaluationSetOutputsNodes(workflow);
if (setOutputsNodes.length === 0) {
throw new TestRunError('SET_OUTPUTS_NODE_NOT_FOUND');
}
const unconfiguredSetOutputsNode = setOutputsNodes.find(
(node) =>
!node.parameters ||
!node.parameters.outputs ||
(node.parameters.outputs as AssignmentCollectionValue).assignments?.length === 0 ||
(node.parameters.outputs as AssignmentCollectionValue).assignments?.some(
(assignment) => !assignment.name || assignment.value === null,
),
);
if (unconfiguredSetOutputsNode) {
throw new TestRunError('SET_OUTPUTS_NODE_NOT_CONFIGURED', {
node_name: unconfiguredSetOutputsNode.name,
});
}
}
/**
* Validates workflow configuration for evaluation
* Throws appropriate TestRunError if validation fails
*/
private validateWorkflowConfiguration(workflow: IWorkflowBase): void {
this.validateEvaluationTriggerNode(workflow);
this.validateSetOutputsNodes(workflow);
this.validateSetMetricsNodes(workflow);
}
/**
@@ -83,7 +177,7 @@ export class TestRunnerService {
// Evaluation executions should run the same way as manual,
// because they need pinned data and partial execution logic
const triggerNode = this.findTriggerNode(workflow);
const triggerNode = this.findEvaluationTriggerNode(workflow);
assert(triggerNode);
const pinData = {
@@ -148,7 +242,7 @@ export class TestRunnerService {
// Evaluation executions should run the same way as manual,
// because they need pinned data and partial execution logic
const triggerNode = this.findTriggerNode(workflow);
const triggerNode = this.findEvaluationTriggerNode(workflow);
if (!triggerNode) {
throw new TestRunError('EVALUATION_TRIGGER_NOT_FOUND');
@@ -219,11 +313,22 @@ export class TestRunnerService {
}
/**
* Get the evaluation metrics nodes from a workflow.
* Get the evaluation set metrics nodes from a workflow.
*/
static getEvaluationMetricsNodes(workflow: IWorkflowBase) {
return workflow.nodes.filter(
(node) => node.type === EVALUATION_NODE && node.parameters.operation === 'setMetrics',
(node) => node.type === EVALUATION_NODE_TYPE && node.parameters.operation === 'setMetrics',
);
}
/**
* Get the evaluation set outputs nodes from a workflow.
*/
static getEvaluationSetOutputsNodes(workflow: IWorkflowBase) {
return workflow.nodes.filter(
(node) =>
node.type === EVALUATION_NODE_TYPE &&
(node.parameters.operation === 'setOutputs' || node.parameters.operation === undefined),
);
}
@@ -231,10 +336,17 @@ export class TestRunnerService {
* Extract the dataset trigger output
*/
private extractDatasetTriggerOutput(execution: IRun, workflow: IWorkflowBase) {
const triggerNode = this.findTriggerNode(workflow);
const triggerNode = this.findEvaluationTriggerNode(workflow);
assert(triggerNode);
const triggerOutputData = execution.data.resultData.runData[triggerNode.name][0];
if (triggerOutputData?.error) {
throw new TestRunError('CANT_FETCH_TEST_CASES', {
message: triggerOutputData.error.message,
});
}
const triggerOutput = triggerOutputData?.data?.main?.[0];
if (!triggerOutput || triggerOutput.length === 0) {
@@ -248,16 +360,16 @@ export class TestRunnerService {
* Evaluation result is collected from all Evaluation Metrics nodes
*/
private extractEvaluationResult(execution: IRun, workflow: IWorkflowBase): IDataObject {
// TODO: Do not fail if not all metric nodes were executed
const metricsNodes = TestRunnerService.getEvaluationMetricsNodes(workflow);
const metricsRunData = metricsNodes.flatMap(
(node) => execution.data.resultData.runData[node.name],
);
// If a metrics node did not execute, ignore it.
const metricsRunData = metricsNodes
.flatMap((node) => execution.data.resultData.runData[node.name])
.filter((data) => data !== undefined);
const metricsData = metricsRunData
.reverse()
.map((data) => data.data?.main?.[0]?.[0]?.json ?? {});
const metricsResult = metricsData.reduce((acc, curr) => ({ ...acc, ...curr }), {});
return metricsResult;
}
@@ -294,6 +406,9 @@ export class TestRunnerService {
// Update test run status
await this.testRunRepository.markAsRunning(testRun.id);
// Check if the workflow is ready for evaluation
this.validateWorkflowConfiguration(workflow);
this.telemetry.track('User ran test', {
user_id: user.id,
run_id: testRun.id,
@@ -377,19 +492,31 @@ export class TestRunnerService {
this.extractEvaluationResult(testCaseExecution, workflow),
);
this.logger.debug('Test case metrics extracted', addedMetrics);
// Create a new test case execution in DB
await this.testCaseExecutionRepository.createTestCaseExecution({
executionId: testCaseExecutionId,
testRun: {
id: testRun.id,
},
runAt,
completedAt,
status: 'success',
metrics: addedMetrics,
});
if (Object.keys(addedMetrics).length === 0) {
await this.testCaseExecutionRepository.createTestCaseExecution({
executionId: testCaseExecutionId,
testRun: {
id: testRun.id,
},
runAt,
completedAt,
status: 'error',
errorCode: 'NO_METRICS_COLLECTED',
});
} else {
this.logger.debug('Test case metrics extracted', addedMetrics);
// Create a new test case execution in DB
await this.testCaseExecutionRepository.createTestCaseExecution({
executionId: testCaseExecutionId,
testRun: {
id: testRun.id,
},
runAt,
completedAt,
status: 'success',
metrics: addedMetrics,
});
}
} catch (e) {
const completedAt = new Date();
// FIXME: this is a temporary log
@@ -500,7 +627,7 @@ export class TestRunnerService {
} else {
const { manager: dbManager } = this.testRunRepository;
// If there is no abort controller - just mark the test run and all its' pending test case executions as cancelled
// If there is no abort controller - just mark the test run and all its pending test case executions as cancelled
await dbManager.transaction(async (trx) => {
await this.testRunRepository.markAsCancelled(testRunId, trx);
await this.testCaseExecutionRepository.markAllPendingAsCancelled(testRunId, trx);

View File

@@ -0,0 +1,19 @@
import type { NodeParameterValueType, INodeParameterResourceLocator } from 'n8n-workflow';
function isRlcValue(value: NodeParameterValueType): value is INodeParameterResourceLocator {
return Boolean(
typeof value === 'object' && value && 'value' in value && '__rl' in value && value.__rl,
);
}
export function checkNodeParameterNotEmpty(value: NodeParameterValueType) {
if (value === undefined || value === null || value === '') {
return false;
}
if (isRlcValue(value)) {
return checkNodeParameterNotEmpty(value.value);
}
return true;
}

View File

@@ -200,6 +200,10 @@ export interface ILicenseReadResponse {
value: number;
warningThreshold: number;
};
workflowsHavingEvaluations: {
limit: number;
value: number;
};
};
license: {
planId: string;

View File

@@ -1,3 +1,4 @@
import type { LicenseState } from '@n8n/backend-common';
import type { WorkflowRepository } from '@n8n/db';
import type { TEntitlement } from '@n8n_io/license-sdk';
import axios, { AxiosError } from 'axios';
@@ -12,12 +13,14 @@ jest.mock('axios');
describe('LicenseService', () => {
const license = mock<License>();
const licenseState = mock<LicenseState>();
const workflowRepository = mock<WorkflowRepository>();
const entitlement = mock<TEntitlement>({ productId: '123' });
const eventService = mock<EventService>();
const licenseService = new LicenseService(
mock(),
license,
licenseState,
workflowRepository,
mock(),
eventService,
@@ -26,7 +29,9 @@ describe('LicenseService', () => {
license.getMainPlan.mockReturnValue(entitlement);
license.getTriggerLimit.mockReturnValue(400);
license.getPlanName.mockReturnValue('Test Plan');
licenseState.getMaxWorkflowsWithEvaluations.mockReturnValue(2);
workflowRepository.getActiveTriggerCount.mockResolvedValue(7);
workflowRepository.getWorkflowsWithEvaluationCount.mockResolvedValue(1);
beforeEach(() => jest.clearAllMocks());
@@ -46,6 +51,10 @@ describe('LicenseService', () => {
value: 7,
warningThreshold: 0.8,
},
workflowsHavingEvaluations: {
limit: 2,
value: 1,
},
},
license: {
planId: '123',

View File

@@ -1,3 +1,4 @@
import { LicenseState } from '@n8n/backend-common';
import type { User } from '@n8n/db';
import { WorkflowRepository } from '@n8n/db';
import { Service } from '@n8n/di';
@@ -26,6 +27,7 @@ export class LicenseService {
constructor(
private readonly logger: Logger,
private readonly license: License,
private readonly licenseState: LicenseState,
private readonly workflowRepository: WorkflowRepository,
private readonly urlService: UrlService,
private readonly eventService: EventService,
@@ -33,6 +35,8 @@ export class LicenseService {
async getLicenseData() {
const triggerCount = await this.workflowRepository.getActiveTriggerCount();
const workflowsWithEvaluationsCount =
await this.workflowRepository.getWorkflowsWithEvaluationCount();
const mainPlan = this.license.getMainPlan();
return {
@@ -42,6 +46,10 @@ export class LicenseService {
limit: this.license.getTriggerLimit(),
warningThreshold: 0.8,
},
workflowsHavingEvaluations: {
value: workflowsWithEvaluationsCount,
limit: this.licenseState.getMaxWorkflowsWithEvaluations(),
},
},
license: {
planId: mainPlan?.productId ?? '',

View File

@@ -37,7 +37,11 @@ describe('LicenseMetricsService', () => {
describe('collectUsageMetrics', () => {
test('should return an array of expected usage metrics', async () => {
const mockActiveTriggerCount = 1234;
const mockWorkflowsWithEvaluationsCount = 5;
workflowRepository.getActiveTriggerCount.mockResolvedValue(mockActiveTriggerCount);
workflowRepository.getWorkflowsWithEvaluationCount.mockResolvedValue(
mockWorkflowsWithEvaluationsCount,
);
const mockRenewalMetrics = {
activeWorkflows: 100,
@@ -48,6 +52,7 @@ describe('LicenseMetricsService', () => {
productionExecutions: 600,
productionRootExecutions: 550,
manualExecutions: 700,
evaluations: 5,
};
licenseMetricsRespository.getLicenseRenewalMetrics.mockResolvedValue(mockRenewalMetrics);
@@ -67,6 +72,7 @@ describe('LicenseMetricsService', () => {
},
{ name: 'manualExecutions', value: mockRenewalMetrics.manualExecutions },
{ name: 'activeWorkflowTriggers', value: mockActiveTriggerCount },
{ name: 'evaluations', value: mockRenewalMetrics.evaluations },
]);
});
});

View File

@@ -20,7 +20,10 @@ export class LicenseMetricsService {
manualExecutions,
} = await this.licenseMetricsRepository.getLicenseRenewalMetrics();
const activeTriggerCount = await this.workflowRepository.getActiveTriggerCount();
const [activeTriggerCount, workflowsWithEvaluationsCount] = await Promise.all([
this.workflowRepository.getActiveTriggerCount(),
this.workflowRepository.getWorkflowsWithEvaluationCount(),
]);
return [
{ name: 'activeWorkflows', value: activeWorkflows },
@@ -32,6 +35,7 @@ export class LicenseMetricsService {
{ name: 'productionRootExecutions', value: productionRootExecutions },
{ name: 'manualExecutions', value: manualExecutions },
{ name: 'activeWorkflowTriggers', value: activeTriggerCount },
{ name: 'evaluations', value: workflowsWithEvaluationsCount },
];
}

View File

@@ -256,6 +256,9 @@ export class FrontendService {
logsView: {
enabled: false,
},
evaluation: {
quota: this.licenseState.getMaxWorkflowsWithEvaluations(),
},
};
}
@@ -395,6 +398,9 @@ export class FrontendService {
this.settings.logsView.enabled = config.get('logs_view.enabled');
// Refresh evaluation settings
this.settings.evaluation.quota = this.licenseState.getMaxWorkflowsWithEvaluations();
return this.settings;
}

View File

@@ -1,6 +1,7 @@
import { WorkflowRepository } from '@n8n/db';
import { Container } from '@n8n/di';
import { createTestRun } from '../../shared/db/evaluation';
import {
createWorkflowWithTrigger,
createWorkflow,
@@ -115,4 +116,73 @@ describe('WorkflowRepository', () => {
expect(activeIds).toHaveLength(1);
});
});
describe('getWorkflowsWithEvaluationCount', () => {
it('should return 0 when no workflows have test runs', async () => {
//
// ARRANGE
//
const workflowRepository = Container.get(WorkflowRepository);
await createWorkflow();
await createWorkflow();
//
// ACT
//
const count = await workflowRepository.getWorkflowsWithEvaluationCount();
//
// ASSERT
//
expect(count).toBe(0);
});
it('should return correct count when some workflows have test runs', async () => {
//
// ARRANGE
//
const workflowRepository = Container.get(WorkflowRepository);
const workflow1 = await createWorkflow();
await createWorkflow();
const workflow3 = await createWorkflow();
await createTestRun(workflow1.id);
await createTestRun(workflow3.id);
//
// ACT
//
const count = await workflowRepository.getWorkflowsWithEvaluationCount();
//
// ASSERT
//
expect(count).toBe(2);
});
it('should count each workflow only once even with multiple test runs', async () => {
//
// ARRANGE
//
const workflowRepository = Container.get(WorkflowRepository);
const workflow1 = await createWorkflow();
const workflow2 = await createWorkflow();
await createTestRun(workflow1.id);
await createTestRun(workflow1.id);
await createTestRun(workflow1.id);
await createTestRun(workflow2.id);
await createTestRun(workflow2.id);
//
// ACT
//
const count = await workflowRepository.getWorkflowsWithEvaluationCount();
//
// ASSERT
//
expect(count).toBe(2);
});
});
});

View File

@@ -83,6 +83,7 @@ describe('LicenseMetricsRepository', () => {
productionExecutions: 3,
productionRootExecutions: 3,
manualExecutions: 2,
evaluations: 0,
});
});
@@ -100,6 +101,7 @@ describe('LicenseMetricsRepository', () => {
productionExecutions: 0, // not NaN
productionRootExecutions: 0, // not NaN
manualExecutions: 0, // not NaN
evaluations: 0,
});
});
});

View File

@@ -119,6 +119,10 @@ const DEFAULT_LICENSE_RESPONSE: { data: ILicenseReadResponse } = {
limit: -1,
warningThreshold: 0.8,
},
workflowsHavingEvaluations: {
value: 0,
limit: 0,
},
},
license: {
planId: '',
@@ -135,6 +139,10 @@ const DEFAULT_POST_RESPONSE: { data: ILicensePostResponse } = {
limit: -1,
warningThreshold: 0.8,
},
workflowsHavingEvaluations: {
value: 0,
limit: 0,
},
},
license: {
planId: '',

View File

@@ -27,7 +27,7 @@ export const STORES = {
BECOME_TEMPLATE_CREATOR: 'becomeTemplateCreator',
PROJECTS: 'projects',
API_KEYS: 'apiKeys',
TEST_DEFINITION: 'testDefinition',
EVALUATION: 'evaluation',
FOLDERS: 'folders',
MODULES: 'modules',
} as const;

View File

@@ -1,8 +1,9 @@
import { setActivePinia, createPinia } from 'pinia';
import { useAgentRequestStore } from './useAgentRequestStore';
import { beforeEach, describe, expect, it, vi } from 'vitest';
import { nextTick } from 'vue';
import { useAgentRequestStore } from './useAgentRequestStore';
// Mock localStorage
const localStorageMock = {
getItem: vi.fn(),
@@ -104,7 +105,7 @@ describe('parameterOverrides.store', () => {
store.addAgentRequest('workflow-1', 'node-1', 'param1', 'value1');
expect(store.agentRequests['workflow-1']['node-1']['param1']).toBe('value1');
expect(store.agentRequests['workflow-1']['node-1'].param1).toBe('value1');
});
it('adds multiple parameter overrides', () => {

View File

@@ -55,6 +55,7 @@ import type {
AI_OTHERS_NODE_CREATOR_VIEW,
ROLE,
AI_UNCATEGORIZED_CATEGORY,
AI_EVALUATION,
} from '@/constants';
import type { BulkCommand, Undoable } from '@/models/history';
@@ -1079,7 +1080,8 @@ export type NodeFilterType =
| typeof TRIGGER_NODE_CREATOR_VIEW
| typeof AI_NODE_CREATOR_VIEW
| typeof AI_OTHERS_NODE_CREATOR_VIEW
| typeof AI_UNCATEGORIZED_CATEGORY;
| typeof AI_UNCATEGORIZED_CATEGORY
| typeof AI_EVALUATION;
export type NodeCreatorOpenSource =
| ''
@@ -1092,7 +1094,9 @@ export type NodeCreatorOpenSource =
| 'node_connection_action'
| 'node_connection_drop'
| 'notice_error_message'
| 'add_node_button';
| 'add_node_button'
| 'add_evaluation_trigger_button'
| 'add_evaluation_node_button';
export interface INodeCreatorState {
itemsFilter: string;
@@ -1318,6 +1322,10 @@ export type UsageState = {
value: number;
warningThreshold: number; // hardcoded value in BE
};
workflowsHavingEvaluations: {
limit: number; // -1 for unlimited, from license
value: number;
};
};
license: {
planId: string; // community
@@ -1466,7 +1474,8 @@ export type CloudUpdateLinkSourceType =
| 'external-secrets'
| 'rbac'
| 'debug'
| 'insights';
| 'insights'
| 'evaluations';
export type UTMCampaign =
| 'upgrade-custom-data-filter'
@@ -1490,7 +1499,8 @@ export type UTMCampaign =
| 'upgrade-external-secrets'
| 'upgrade-rbac'
| 'upgrade-debug'
| 'upgrade-insights';
| 'upgrade-insights'
| 'upgrade-evaluations';
export type N8nBanners = {
[key in BannerName]: {

View File

@@ -160,4 +160,7 @@ export const defaultSettings: FrontendSettings = {
logsView: {
enabled: false,
},
evaluation: {
quota: 0,
},
};

View File

@@ -0,0 +1,108 @@
import type { IRestApiContext } from '@/Interface';
import { makeRestApiRequest, request } from '@/utils/apiUtils';
export interface TestRunRecord {
id: string;
workflowId: string;
status: 'new' | 'running' | 'completed' | 'error' | 'cancelled' | 'warning' | 'success';
metrics?: Record<string, number>;
createdAt: string;
updatedAt: string;
runAt: string;
completedAt: string;
errorCode?: string;
errorDetails?: Record<string, unknown>;
finalResult?: 'success' | 'error' | 'warning';
}
interface GetTestRunParams {
workflowId: string;
runId: string;
}
interface DeleteTestRunParams {
workflowId: string;
runId: string;
}
export interface TestCaseExecutionRecord {
id: string;
testRunId: string;
executionId: string;
status: 'running' | 'completed' | 'error';
createdAt: string;
updatedAt: string;
runAt: string;
metrics?: Record<string, number>;
errorCode?: string;
errorDetails?: Record<string, unknown>;
}
const getTestRunsEndpoint = (workflowId: string, runId?: string) =>
`/workflows/${workflowId}/test-runs${runId ? `/${runId}` : ''}`;
// Get all test runs for a test definition
export const getTestRuns = async (context: IRestApiContext, workflowId: string) => {
return await makeRestApiRequest<TestRunRecord[]>(context, 'GET', getTestRunsEndpoint(workflowId));
};
// Get specific test run
export const getTestRun = async (context: IRestApiContext, params: GetTestRunParams) => {
return await makeRestApiRequest<TestRunRecord>(
context,
'GET',
getTestRunsEndpoint(params.workflowId, params.runId),
);
};
// Start a new test run
export const startTestRun = async (context: IRestApiContext, workflowId: string) => {
const response = await request({
method: 'POST',
baseURL: context.baseUrl,
endpoint: `/workflows/${workflowId}/test-runs/new`,
headers: { 'push-ref': context.pushRef },
});
// CLI is returning the response without wrapping it in `data` key
return response as { success: boolean };
};
export const cancelTestRun = async (
context: IRestApiContext,
workflowId: string,
testRunId: string,
) => {
const response = await request({
method: 'POST',
baseURL: context.baseUrl,
endpoint: `/workflows/${workflowId}/test-runs/${testRunId}/cancel`,
headers: { 'push-ref': context.pushRef },
});
// CLI is returning the response without wrapping it in `data` key
return response as { success: boolean };
};
// Delete a test run
export const deleteTestRun = async (context: IRestApiContext, params: DeleteTestRunParams) => {
return await makeRestApiRequest<{ success: boolean }>(
context,
'DELETE',
getTestRunsEndpoint(params.workflowId, params.runId),
);
};
const getRunExecutionsEndpoint = (workflowId: string, runId: string) =>
`/workflows/${workflowId}/test-runs/${runId}/test-cases`;
// Get all test cases of a test run
export const getTestCaseExecutions = async (
context: IRestApiContext,
workflowId: string,
runId: string,
) => {
return await makeRestApiRequest<TestCaseExecutionRecord[]>(
context,
'GET',
getRunExecutionsEndpoint(workflowId, runId),
);
};

View File

@@ -1,210 +0,0 @@
import type { IRestApiContext } from '@/Interface';
import { makeRestApiRequest, request } from '@/utils/apiUtils';
export interface TestDefinitionRecord {
id: string;
name: string;
workflowId: string;
evaluationWorkflowId?: string | null;
annotationTagId?: string | null;
description?: string | null;
updatedAt?: string;
createdAt: string;
annotationTag?: string | null;
mockedNodes?: Array<{ name: string; id: string }>;
}
interface CreateTestDefinitionParams {
name: string;
workflowId: string;
evaluationWorkflowId?: string | null;
}
export interface UpdateTestDefinitionParams {
name?: string;
evaluationWorkflowId?: string | null;
annotationTagId?: string | null;
description?: string | null;
mockedNodes?: Array<{ name: string; id: string }>;
}
export interface UpdateTestResponse {
createdAt: string;
updatedAt: string;
id: string;
name: string;
workflowId: string;
description?: string | null;
annotationTag?: string | null;
evaluationWorkflowId?: string | null;
annotationTagId?: string | null;
}
export interface TestRunRecord {
id: string;
testDefinitionId: string;
status: 'new' | 'running' | 'completed' | 'error' | 'cancelled' | 'warning' | 'success';
metrics?: Record<string, number>;
createdAt: string;
updatedAt: string;
runAt: string;
completedAt: string;
errorCode?: string;
errorDetails?: Record<string, unknown>;
finalResult?: 'success' | 'error' | 'warning';
}
interface GetTestRunParams {
testDefinitionId: string;
runId: string;
}
interface DeleteTestRunParams {
testDefinitionId: string;
runId: string;
}
export interface TestCaseExecutionRecord {
id: string;
testRunId: string;
executionId: string;
pastExecutionId: string;
evaluationExecutionId: string;
status: 'running' | 'completed' | 'error';
createdAt: string;
updatedAt: string;
runAt: string;
metrics?: Record<string, number>;
errorCode?: string;
errorDetails?: Record<string, unknown>;
}
const endpoint = '/evaluation/test-definitions';
export async function getTestDefinitions(
context: IRestApiContext,
params?: { workflowId?: string },
) {
let url = endpoint;
if (params?.workflowId) {
url += `?filter=${JSON.stringify({ workflowId: params.workflowId })}`;
}
return await makeRestApiRequest<{ count: number; testDefinitions: TestDefinitionRecord[] }>(
context,
'GET',
url,
);
}
export async function getTestDefinition(context: IRestApiContext, id: string) {
return await makeRestApiRequest<TestDefinitionRecord>(context, 'GET', `${endpoint}/${id}`);
}
export async function createTestDefinition(
context: IRestApiContext,
params: CreateTestDefinitionParams,
) {
return await makeRestApiRequest<TestDefinitionRecord>(context, 'POST', endpoint, params);
}
export async function updateTestDefinition(
context: IRestApiContext,
id: string,
params: UpdateTestDefinitionParams,
) {
return await makeRestApiRequest<UpdateTestResponse>(
context,
'PATCH',
`${endpoint}/${id}`,
params,
);
}
export async function deleteTestDefinition(context: IRestApiContext, id: string) {
return await makeRestApiRequest<{ success: boolean }>(context, 'DELETE', `${endpoint}/${id}`);
}
export async function getExampleEvaluationInput(
context: IRestApiContext,
testDefinitionId: string,
annotationTagId: string,
) {
return await makeRestApiRequest<Record<string, unknown> | null>(
context,
'GET',
`${endpoint}/${testDefinitionId}/example-evaluation-input?annotationTagId=${annotationTagId}`,
);
}
const getRunsEndpoint = (testDefinitionId: string, runId?: string) =>
`${endpoint}/${testDefinitionId}/runs${runId ? `/${runId}` : ''}`;
// Get all test runs for a test definition
export const getTestRuns = async (context: IRestApiContext, testDefinitionId: string) => {
return await makeRestApiRequest<TestRunRecord[]>(
context,
'GET',
getRunsEndpoint(testDefinitionId),
);
};
// Get specific test run
export const getTestRun = async (context: IRestApiContext, params: GetTestRunParams) => {
return await makeRestApiRequest<TestRunRecord>(
context,
'GET',
getRunsEndpoint(params.testDefinitionId, params.runId),
);
};
// Start a new test run
export const startTestRun = async (context: IRestApiContext, testDefinitionId: string) => {
const response = await request({
method: 'POST',
baseURL: context.baseUrl,
endpoint: `${endpoint}/${testDefinitionId}/run`,
headers: { 'push-ref': context.pushRef },
});
// CLI is returning the response without wrapping it in `data` key
return response as { success: boolean };
};
export const cancelTestRun = async (
context: IRestApiContext,
testDefinitionId: string,
testRunId: string,
) => {
const response = await request({
method: 'POST',
baseURL: context.baseUrl,
endpoint: `${endpoint}/${testDefinitionId}/runs/${testRunId}/cancel`,
headers: { 'push-ref': context.pushRef },
});
// CLI is returning the response without wrapping it in `data` key
return response as { success: boolean };
};
// Delete a test run
export const deleteTestRun = async (context: IRestApiContext, params: DeleteTestRunParams) => {
return await makeRestApiRequest<{ success: boolean }>(
context,
'DELETE',
getRunsEndpoint(params.testDefinitionId, params.runId),
);
};
const getRunExecutionsEndpoint = (testDefinitionId: string, runId: string) =>
`${endpoint}/${testDefinitionId}/runs/${runId}/cases`;
// Get all test cases of a test run
export const getTestCaseExecutions = async (
context: IRestApiContext,
testDefinitionId: string,
runId: string,
) => {
return await makeRestApiRequest<TestCaseExecutionRecord[]>(
context,
'GET',
getRunExecutionsEndpoint(testDefinitionId, runId),
);
};

View File

@@ -1,5 +1,5 @@
<script setup lang="ts">
import type { TestRunRecord } from '@/api/testDefinition.ee';
import type { TestRunRecord } from '@/api/evaluation.ee';
import { computed, watchEffect } from 'vue';
import { Line } from 'vue-chartjs';
import { useMetricsChart } from '../composables/useMetricsChart';

View File

@@ -1,7 +1,7 @@
<script setup lang="ts">
import type { TestRunRecord } from '@/api/testDefinition.ee';
import MetricsChart from '@/components/TestDefinition/ListRuns/MetricsChart.vue';
import TestRunsTable from '@/components/TestDefinition/ListRuns/TestRunsTable.vue';
import type { TestRunRecord } from '@/api/evaluation.ee';
import MetricsChart from '@/components/Evaluations.ee/ListRuns/MetricsChart.vue';
import TestRunsTable from '@/components/Evaluations.ee/ListRuns/TestRunsTable.vue';
import { useI18n } from '@/composables/useI18n';
import { VIEWS } from '@/constants';
import { convertToDisplayDate } from '@/utils/formatters/dateFormatter';
@@ -10,7 +10,7 @@ import { useRouter } from 'vue-router';
const props = defineProps<{
runs: Array<TestRunRecord & { index: number }>;
testId: string;
workflowId: string;
}>();
const locale = useI18n();
@@ -42,7 +42,7 @@ const metricColumns = computed(() =>
const columns = computed(() => [
{
prop: 'id',
label: locale.baseText('testDefinition.listRuns.runNumber'),
label: locale.baseText('evaluation.listRuns.runNumber'),
showOverflowTooltip: true,
},
{
@@ -59,7 +59,7 @@ const columns = computed(() => [
},
{
prop: 'status',
label: locale.baseText('testDefinition.listRuns.status'),
label: locale.baseText('evaluation.listRuns.status'),
sortable: true,
},
...metricColumns.value,
@@ -67,8 +67,8 @@ const columns = computed(() => [
const handleRowClick = (row: TestRunRecord) => {
void router.push({
name: VIEWS.TEST_DEFINITION_RUNS_DETAIL,
params: { testId: row.testDefinitionId, runId: row.id },
name: VIEWS.EVALUATION_RUNS_DETAIL,
params: { runId: row.id },
});
};
</script>

View File

@@ -0,0 +1,177 @@
<script setup lang="ts">
import type { TestRunRecord } from '@/api/evaluation.ee';
import { useI18n } from '@/composables/useI18n';
import { N8nIcon, N8nText } from '@n8n/design-system';
import { computed } from 'vue';
import type { TestTableColumn } from '../shared/TestTableBase.vue';
import type { BaseTextKey } from '@/plugins/i18n';
import TestTableBase from '../shared/TestTableBase.vue';
import { statusDictionary } from '../shared/statusDictionary';
import { getErrorBaseKey } from '@/components/Evaluations.ee/shared/errorCodes';
const emit = defineEmits<{
rowClick: [run: TestRunRecord & { index: number }];
}>();
const props = defineProps<{
runs: Array<TestRunRecord & { index: number }>;
columns: Array<TestTableColumn<TestRunRecord & { index: number }>>;
}>();
const locale = useI18n();
const styledColumns = computed(() => {
return props.columns.map((column) => {
if (column.prop === 'id') {
return {
...column,
width: 100,
};
}
if (column.prop === 'runAt') {
return {
...column,
width: 150,
};
}
return column;
});
});
// Combine test run statuses and finalResult to get the final status
const runSummaries = computed(() => {
return props.runs.map(({ status, finalResult, errorDetails, ...run }) => {
if (status === 'completed' && finalResult && ['error', 'warning'].includes(finalResult)) {
status = 'warning';
}
return {
...run,
status,
finalResult,
errorDetails: errorDetails as Record<string, string | number> | undefined,
};
});
});
</script>
<template>
<div :class="$style.container">
<N8nHeading size="large" :bold="true" :class="$style.runsTableHeading" color="text-base">
{{ locale.baseText('evaluation.listRuns.pastRuns.total', { adjustToNumber: runs.length }) }}
({{ runs.length }})
</N8nHeading>
<TestTableBase
:data="runSummaries"
:columns="styledColumns"
:default-sort="{ prop: 'runAt', order: 'descending' }"
@row-click="(row) => (row.status !== 'error' ? emit('rowClick', row) : undefined)"
>
<template #id="{ row }">#{{ row.index }} </template>
<template #status="{ row }">
<div
style="display: inline-flex; gap: 12px; text-transform: capitalize; align-items: center"
>
<N8nText v-if="row.status === 'running'" color="secondary">
<AnimatedSpinner />
</N8nText>
<N8nIcon
v-else
:icon="statusDictionary[row.status].icon"
:color="statusDictionary[row.status].color"
/>
<template v-if="row.status === 'warning'">
<N8nText color="warning" :class="[$style.alertText, $style.warningText]">
{{ locale.baseText(`evaluation.runDetail.error.partialCasesFailed`) }}
</N8nText>
</template>
<template v-else-if="row.status === 'error'">
<N8nTooltip placement="top" :show-after="300">
<template #content>
<i18n-t :keypath="`${getErrorBaseKey(row.errorCode)}`">
<template
v-if="
locale.exists(`${getErrorBaseKey(row.errorCode)}.description` as BaseTextKey)
"
#description
>
{{
locale.baseText(
`${getErrorBaseKey(row.errorCode)}.description` as BaseTextKey,
) && '. '
}}
{{
locale.baseText(
`${getErrorBaseKey(row.errorCode)}.description` as BaseTextKey,
)
}}
</template>
</i18n-t>
</template>
<N8nText :class="[$style.alertText, $style.errorText]">
<i18n-t :keypath="`${getErrorBaseKey(row.errorCode)}`">
<template
v-if="
locale.exists(`${getErrorBaseKey(row.errorCode)}.description` as BaseTextKey)
"
#description
>
<p :class="$style.grayText">
{{
locale.baseText(
`${getErrorBaseKey(row.errorCode)}.description` as BaseTextKey,
)
}}
</p>
</template>
</i18n-t>
</N8nText>
</N8nTooltip>
</template>
<template v-else>
{{ row.status }}
</template>
</div>
</template>
</TestTableBase>
</div>
</template>
<style module lang="scss">
.container {
display: flex;
flex-direction: column;
gap: 8px;
}
.grayText {
color: var(--color-text-light);
}
.alertText {
display: -webkit-box;
-webkit-line-clamp: 2;
line-clamp: 2;
-webkit-box-orient: vertical;
max-width: 100%;
text-overflow: ellipsis;
overflow: hidden;
white-space: normal;
word-break: break-word;
line-height: 1.25;
text-transform: none;
}
.alertText::first-letter {
text-transform: uppercase;
}
.warningText {
color: var(--color-warning);
}
.errorText {
color: var(--color-text-danger);
}
</style>

View File

@@ -0,0 +1,27 @@
<script setup lang="ts">
import { useI18n } from '@/composables/useI18n';
import { COMMUNITY_PLUS_ENROLLMENT_MODAL } from '@/constants';
import { useUIStore } from '@/stores/ui.store';
const i18n = useI18n();
const uiStore = useUIStore();
const goToUpgrade = async () => {
uiStore.openModalWithData({
name: COMMUNITY_PLUS_ENROLLMENT_MODAL,
data: {
customHeading: undefined,
},
});
};
</script>
<template>
<n8n-action-box
data-test-id="evaluations-unlicensed"
:heading="i18n.baseText('evaluations.paywall.title')"
:description="i18n.baseText('evaluations.paywall.description')"
:button-text="i18n.baseText('evaluations.paywall.cta')"
@click="goToUpgrade"
></n8n-action-box>
</template>

View File

@@ -0,0 +1,325 @@
<script setup lang="ts">
import { useI18n } from '@/composables/useI18n';
import { N8nText, N8nButton, N8nCallout } from '@n8n/design-system';
import { ref, computed } from 'vue';
import { useWorkflowsStore } from '@/stores/workflows.store';
import { useEvaluationStore } from '@/stores/evaluation.store.ee';
import { PLACEHOLDER_EMPTY_WORKFLOW_ID, VIEWS } from '@/constants';
import StepHeader from '../shared/StepHeader.vue';
import { useRouter } from 'vue-router';
import { useUsageStore } from '@/stores/usage.store';
import { usePageRedirectionHelper } from '@/composables/usePageRedirectionHelper';
defineEmits<{
runTest: [];
}>();
const router = useRouter();
const locale = useI18n();
const workflowsStore = useWorkflowsStore();
const evaluationStore = useEvaluationStore();
const usageStore = useUsageStore();
const pageRedirectionHelper = usePageRedirectionHelper();
const hasRuns = computed(() => {
return evaluationStore.testRunsByWorkflowId[workflowsStore.workflow.id]?.length > 0;
});
const evaluationsAvailable = computed(() => {
return (
usageStore.workflowsWithEvaluationsLimit === -1 ||
usageStore.workflowsWithEvaluationsCount < usageStore.workflowsWithEvaluationsLimit
);
});
const evaluationsQuotaExceeded = computed(() => {
return (
usageStore.workflowsWithEvaluationsLimit !== -1 &&
usageStore.workflowsWithEvaluationsCount >= usageStore.workflowsWithEvaluationsLimit &&
!hasRuns.value
);
});
const activeStepIndex = ref(0);
// Calculate the initial active step based on the workflow state
const initializeActiveStep = () => {
if (evaluationsQuotaExceeded.value) {
activeStepIndex.value = 2;
return;
}
if (
evaluationStore.evaluationTriggerExists &&
evaluationStore.evaluationSetOutputsNodeExist &&
evaluationStore.evaluationSetMetricsNodeExist
) {
activeStepIndex.value = 3;
} else if (
evaluationStore.evaluationTriggerExists &&
evaluationStore.evaluationSetOutputsNodeExist
) {
activeStepIndex.value = 2;
} else if (evaluationStore.evaluationTriggerExists) {
activeStepIndex.value = 1;
} else {
activeStepIndex.value = 0;
}
};
// Run initialization on component mount
initializeActiveStep();
const toggleStep = (index: number) => {
activeStepIndex.value = index;
};
function navigateToWorkflow(
action?: 'addEvaluationTrigger' | 'addEvaluationNode' | 'executeEvaluation',
) {
const routeWorkflowId =
workflowsStore.workflow.id === PLACEHOLDER_EMPTY_WORKFLOW_ID
? 'new'
: workflowsStore.workflow.id;
void router.push({
name: VIEWS.WORKFLOW,
params: { name: routeWorkflowId },
query: action ? { action } : undefined,
});
}
function onSeePlans() {
void pageRedirectionHelper.goToUpgrade('evaluations', 'upgrade-evaluations');
}
</script>
<template>
<div :class="$style.container" data-test-id="evaluation-setup-wizard">
<div :class="$style.steps">
<!-- Step 1 -->
<div :class="[$style.step, $style.completed]">
<StepHeader
:step-number="1"
:title="locale.baseText('evaluations.setupWizard.step1.title')"
:is-completed="evaluationStore.evaluationTriggerExists"
:is-active="activeStepIndex === 0"
@click="toggleStep(0)"
/>
<div v-if="activeStepIndex === 0" :class="$style.stepContent">
<ul :class="$style.bulletPoints">
<li>
<N8nText size="small" color="text-base">
{{ locale.baseText('evaluations.setupWizard.step1.item1') }}
</N8nText>
</li>
<li>
<N8nText size="small" color="text-base">
{{ locale.baseText('evaluations.setupWizard.step1.item2') }}
</N8nText>
</li>
</ul>
<div :class="$style.actionButton">
<N8nButton
size="small"
type="secondary"
@click="navigateToWorkflow('addEvaluationTrigger')"
>
{{ locale.baseText('evaluations.setupWizard.step1.button') }}
</N8nButton>
</div>
</div>
</div>
<!-- Step 2 -->
<div :class="[$style.step, activeStepIndex === 1 ? $style.active : '']">
<StepHeader
:step-number="2"
:title="locale.baseText('evaluations.setupWizard.step2.title')"
:is-completed="evaluationStore.evaluationSetOutputsNodeExist"
:is-active="activeStepIndex === 1"
@click="toggleStep(1)"
/>
<div v-if="activeStepIndex === 1" :class="$style.stepContent">
<ul :class="$style.bulletPoints">
<li>
<N8nText size="small" color="text-base">
{{ locale.baseText('evaluations.setupWizard.step2.item1') }}
</N8nText>
</li>
</ul>
<div :class="$style.actionButton">
<N8nButton
size="small"
type="secondary"
@click="navigateToWorkflow('addEvaluationNode')"
>
{{ locale.baseText('evaluations.setupWizard.step2.button') }}
</N8nButton>
</div>
</div>
</div>
<!-- Step 3 -->
<div :class="$style.step">
<StepHeader
:step-number="3"
:title="locale.baseText('evaluations.setupWizard.step3.title')"
:is-completed="evaluationStore.evaluationSetMetricsNodeExist"
:is-active="activeStepIndex === 2"
:is-optional="true"
@click="toggleStep(2)"
/>
<div v-if="activeStepIndex === 2" :class="$style.stepContent">
<ul v-if="!evaluationsQuotaExceeded" :class="$style.bulletPoints">
<li>
<N8nText size="small" color="text-base">
{{ locale.baseText('evaluations.setupWizard.step3.item1') }}
</N8nText>
</li>
<li>
<N8nText size="small" color="text-base">
{{ locale.baseText('evaluations.setupWizard.step3.item2') }}
</N8nText>
</li>
</ul>
<N8nCallout v-else theme="warning" iconless>
{{ locale.baseText('evaluations.setupWizard.limitReached') }}
</N8nCallout>
<div :class="$style.actionButton">
<N8nButton
v-if="!evaluationsQuotaExceeded"
size="small"
type="secondary"
@click="navigateToWorkflow('addEvaluationNode')"
>
{{ locale.baseText('evaluations.setupWizard.step3.button') }}
</N8nButton>
<N8nButton v-else size="small" @click="onSeePlans">
{{ locale.baseText('generic.seePlans') }}
</N8nButton>
<N8nButton
size="small"
text
style="color: var(--color-text-light)"
@click="toggleStep(3)"
>
{{ locale.baseText('evaluations.setupWizard.step3.skip') }}
</N8nButton>
</div>
<div
v-if="usageStore.workflowsWithEvaluationsLimit !== -1 && evaluationsAvailable"
:class="$style.quotaNote"
>
<N8nText size="xsmall" color="text-base">
<i18n-t keypath="evaluations.setupWizard.step3.notice">
<template #link>
<a style="text-decoration: underline; color: inherit" @click="onSeePlans"
>{{ locale.baseText('evaluations.setupWizard.step3.notice.link') }}
</a>
</template>
</i18n-t>
</N8nText>
</div>
</div>
</div>
<!-- Step 4 -->
<div :class="$style.step">
<StepHeader
:step-number="4"
:title="locale.baseText('evaluations.setupWizard.step4.title')"
:is-completed="false"
:is-active="activeStepIndex === 3"
@click="toggleStep(3)"
>
<div :class="[$style.actionButton, $style.actionButtonInline]">
<N8nButton
v-if="evaluationStore.evaluationSetMetricsNodeExist && !evaluationsQuotaExceeded"
size="medium"
type="secondary"
:disabled="
!evaluationStore.evaluationTriggerExists ||
!evaluationStore.evaluationSetOutputsNodeExist
"
@click="$emit('runTest')"
>
{{ locale.baseText('evaluations.setupWizard.step4.button') }}
</N8nButton>
<N8nButton
v-else
size="medium"
type="secondary"
:disabled="
!evaluationStore.evaluationTriggerExists ||
!evaluationStore.evaluationSetOutputsNodeExist
"
@click="navigateToWorkflow('executeEvaluation')"
>
{{ locale.baseText('evaluations.setupWizard.step4.altButton') }}
</N8nButton>
</div>
</StepHeader>
</div>
</div>
</div>
</template>
<style module lang="scss">
.container {
background-color: var(--color-background-light);
}
.steps {
display: flex;
flex-direction: column;
gap: var(--spacing-m);
}
.step {
overflow: hidden;
}
.stepContent {
padding: 0 0 0 calc(var(--spacing-xs) + 28px);
animation: slideDown 0.2s ease;
}
.bulletPoints {
padding-left: var(--spacing-s);
li {
margin-bottom: var(--spacing-3xs);
}
}
.actionButton {
margin-top: var(--spacing-s);
display: flex;
gap: var(--spacing-s);
button {
font-weight: var(--font-weight-bold);
}
}
.actionButtonInline {
margin: 0;
}
.quotaNote {
margin-top: var(--spacing-2xs);
}
@keyframes slideDown {
from {
opacity: 0;
transform: translateY(-10px);
}
to {
opacity: 1;
transform: translateY(0);
}
}
</style>

View File

@@ -1,5 +1,5 @@
import type { ChartData, ChartOptions } from 'chart.js';
import type { TestRunRecord } from '@/api/testDefinition.ee';
import type { TestRunRecord } from '@/api/evaluation.ee';
import dateFormat from 'dateformat';
import { useCssVar } from '@vueuse/core';

View File

@@ -0,0 +1,66 @@
<script setup lang="ts">
import { N8nText, N8nBadge } from '@n8n/design-system';
import StepIndicator from './StepIndicator.vue';
import { useI18n } from '@/composables/useI18n';
defineProps<{
stepNumber: number;
title: string;
isCompleted: boolean;
isActive: boolean;
isOptional?: boolean;
}>();
const emit = defineEmits<{
click: [];
}>();
const locale = useI18n();
const handleClick = (event: Event) => {
// Only emit click event if the click isn't on a button or interactive element
if (
!(event.target as HTMLElement).closest('button') &&
!(event.target as HTMLElement).closest('a') &&
!(event.target as HTMLElement).closest('input') &&
!(event.target as HTMLElement).closest('select')
) {
emit('click');
}
};
</script>
<template>
<div :class="$style.stepHeader" @click="handleClick">
<StepIndicator :step-number="stepNumber" :is-completed="isCompleted" :is-active="isActive" />
<!-- Use slot if provided, otherwise use title prop -->
<div :class="$style.titleSlot">
<slot>
<N8nText
size="medium"
:color="isActive || isCompleted ? 'text-dark' : 'text-light'"
tag="span"
bold
>
{{ title }}
</N8nText>
</slot>
</div>
<N8nBadge
v-if="isOptional"
style="background-color: var(--color-background-base); border: none"
>
{{ locale.baseText('evaluations.setupWizard.stepHeader.optional') }}
</N8nBadge>
</div>
</template>
<style module lang="scss">
.stepHeader {
display: flex;
align-items: center;
gap: var(--spacing-xs);
cursor: pointer;
position: relative;
}
</style>

View File

@@ -0,0 +1,61 @@
<script setup lang="ts">
import { N8nIcon } from '@n8n/design-system';
defineProps<{
stepNumber: number;
isCompleted: boolean;
isActive?: boolean;
}>();
</script>
<template>
<div
:class="[
$style.stepIndicator,
isCompleted && $style.completed,
isActive && $style.active,
!isActive && !isCompleted && $style.inactive,
]"
>
<template v-if="isCompleted">
<N8nIcon icon="check" size="xsmall" />
</template>
<template v-else>
{{ stepNumber }}
</template>
</div>
</template>
<style module lang="scss">
.stepIndicator {
display: flex;
align-items: center;
justify-content: center;
min-width: 28px;
height: 28px;
border-radius: 50%;
border: 1px solid var(--color-text-light);
color: var(--color-text-light);
font-weight: var(--font-weight-bold);
font-size: var(--font-size-2xs);
flex-shrink: 0;
transition: all 0.2s ease;
&.active {
border-color: var(--color-primary);
color: var(--color-text-dark);
}
&.completed {
background-color: var(--color-success);
border-color: var(--color-success);
color: var(--prim-color-white);
}
&.inactive {
color: var(--color-text-light);
border-color: var(--color-text-base);
opacity: 0.7;
}
}
</style>

View File

@@ -1,7 +1,7 @@
<script setup lang="ts" generic="T">
import N8nTooltip from '@n8n/design-system/components/N8nTooltip';
import type { BaseTextKey } from '@/plugins/i18n';
import type { TestTableColumn } from '@/components/TestDefinition/shared/TestTableBase.vue';
import type { TestTableColumn } from '@/components/Evaluations.ee/shared/TestTableBase.vue';
import { useI18n } from '@/composables/useI18n';
import { useRouter } from 'vue-router';
@@ -23,15 +23,19 @@ function hasError(row: unknown): row is WithError {
const errorTooltipMap: Record<string, BaseTextKey> = {
// Test case errors
MOCKED_NODE_DOES_NOT_EXIST: 'testDefinition.runDetail.error.mockedNodeMissing',
FAILED_TO_EXECUTE_EVALUATION_WORKFLOW: 'testDefinition.runDetail.error.evaluationFailed',
FAILED_TO_EXECUTE_WORKFLOW: 'testDefinition.runDetail.error.executionFailed',
TRIGGER_NO_LONGER_EXISTS: 'testDefinition.runDetail.error.triggerNoLongerExists',
INVALID_METRICS: 'testDefinition.runDetail.error.invalidMetrics',
MOCKED_NODE_NOT_FOUND: 'evaluation.runDetail.error.mockedNodeMissing',
FAILED_TO_EXECUTE_WORKFLOW: 'evaluation.runDetail.error.executionFailed',
INVALID_METRICS: 'evaluation.runDetail.error.invalidMetrics',
// Test run errors
PAST_EXECUTIONS_NOT_FOUND: 'testDefinition.listRuns.error.noPastExecutions',
EVALUATION_WORKFLOW_NOT_FOUND: 'testDefinition.listRuns.error.evaluationWorkflowNotFound',
TEST_CASES_NOT_FOUND: 'evaluation.listRuns.error.testCasesNotFound',
EVALUATION_TRIGGER_NOT_FOUND: 'evaluation.listRuns.error.evaluationTriggerNotFound',
EVALUATION_TRIGGER_NOT_CONFIGURED: 'evaluation.listRuns.error.evaluationTriggerNotConfigured',
SET_OUTPUTS_NODE_NOT_FOUND: 'evaluation.listRuns.error.setOutputsNodeNotFound',
SET_OUTPUTS_NODE_NOT_CONFIGURED: 'evaluation.listRuns.error.setOutputsNodeNotConfigured',
SET_METRICS_NODE_NOT_FOUND: 'evaluation.listRuns.error.setMetricsNodeNotFound',
SET_METRICS_NODE_NOT_CONFIGURED: 'evaluation.listRuns.error.setMetricsNodeNotConfigured',
CANT_FETCH_TEST_CASES: 'evaluation.listRuns.error.cantFetchTestCases',
};
// FIXME: move status logic to a parent component
@@ -47,14 +51,14 @@ const statusThemeMap: Record<string, string> = {
};
const statusLabelMap: Record<string, string> = {
new: locale.baseText('testDefinition.listRuns.status.new'),
running: locale.baseText('testDefinition.listRuns.status.running'),
evaluation_running: locale.baseText('testDefinition.listRuns.status.evaluating'),
completed: locale.baseText('testDefinition.listRuns.status.completed'),
error: locale.baseText('testDefinition.listRuns.status.error'),
success: locale.baseText('testDefinition.listRuns.status.success'),
warning: locale.baseText('testDefinition.listRuns.status.warning'),
cancelled: locale.baseText('testDefinition.listRuns.status.cancelled'),
new: locale.baseText('evaluation.listRuns.status.new'),
running: locale.baseText('evaluation.listRuns.status.running'),
evaluation_running: locale.baseText('evaluation.listRuns.status.evaluating'),
completed: locale.baseText('evaluation.listRuns.status.completed'),
error: locale.baseText('evaluation.listRuns.status.error'),
success: locale.baseText('evaluation.listRuns.status.success'),
warning: locale.baseText('evaluation.listRuns.status.warning'),
cancelled: locale.baseText('evaluation.listRuns.status.cancelled'),
};
function getErrorTooltip(column: TestTableColumn<T>, row: T): string | undefined {

View File

@@ -112,7 +112,9 @@ defineSlots<{
:data="localData"
:border="true"
:cell-class-name="$style.customCell"
:row-class-name="$style.customRow"
:row-class-name="
({ row }) => (row?.status === 'error' ? $style.customDisabledRow : $style.customRow)
"
scrollbar-always-on
@selection-change="handleSelectionChange"
@header-dragend="handleColumnResize"
@@ -184,6 +186,11 @@ defineSlots<{
--color-table-row-hover-background: var(--color-background-light);
}
.customDisabledRow {
cursor: default;
--color-table-row-hover-background: var(--color-background-light);
}
.customHeaderCell {
display: flex;
gap: 4px;

View File

@@ -0,0 +1,59 @@
import type { BaseTextKey } from '@/plugins/i18n';
const TEST_CASE_EXECUTION_ERROR_CODE = {
MOCKED_NODE_NOT_FOUND: 'MOCKED_NODE_NOT_FOUND',
FAILED_TO_EXECUTE_WORKFLOW: 'FAILED_TO_EXECUTE_WORKFLOW',
INVALID_METRICS: 'INVALID_METRICS',
UNKNOWN_ERROR: 'UNKNOWN_ERROR',
NO_METRICS_COLLECTED: 'NO_METRICS_COLLECTED',
} as const;
export type TestCaseExecutionErrorCodes =
(typeof TEST_CASE_EXECUTION_ERROR_CODE)[keyof typeof TEST_CASE_EXECUTION_ERROR_CODE];
const TEST_RUN_ERROR_CODES = {
TEST_CASES_NOT_FOUND: 'TEST_CASES_NOT_FOUND',
INTERRUPTED: 'INTERRUPTED',
UNKNOWN_ERROR: 'UNKNOWN_ERROR',
EVALUATION_TRIGGER_NOT_FOUND: 'EVALUATION_TRIGGER_NOT_FOUND',
EVALUATION_TRIGGER_NOT_CONFIGURED: 'EVALUATION_TRIGGER_NOT_CONFIGURED',
EVALUATION_TRIGGER_DISABLED: 'EVALUATION_TRIGGER_DISABLED',
SET_OUTPUTS_NODE_NOT_FOUND: 'SET_OUTPUTS_NODE_NOT_FOUND',
SET_OUTPUTS_NODE_NOT_CONFIGURED: 'SET_OUTPUTS_NODE_NOT_CONFIGURED',
SET_METRICS_NODE_NOT_FOUND: 'SET_METRICS_NODE_NOT_FOUND',
SET_METRICS_NODE_NOT_CONFIGURED: 'SET_METRICS_NODE_NOT_CONFIGURED',
CANT_FETCH_TEST_CASES: 'CANT_FETCH_TEST_CASES',
PARTIAL_CASES_FAILED: 'PARTIAL_CASES_FAILED',
} as const;
export type TestRunErrorCode = (typeof TEST_RUN_ERROR_CODES)[keyof typeof TEST_RUN_ERROR_CODES];
const testCaseErrorDictionary: Partial<Record<TestCaseExecutionErrorCodes, BaseTextKey>> = {
MOCKED_NODE_NOT_FOUND: 'evaluation.runDetail.error.mockedNodeMissing',
FAILED_TO_EXECUTE_WORKFLOW: 'evaluation.runDetail.error.executionFailed',
INVALID_METRICS: 'evaluation.runDetail.error.invalidMetrics',
UNKNOWN_ERROR: 'evaluation.runDetail.error.unknownError',
NO_METRICS_COLLECTED: 'evaluation.runDetail.error.noMetricsCollected',
} as const;
const testRunErrorDictionary: Partial<Record<TestRunErrorCode, BaseTextKey>> = {
TEST_CASES_NOT_FOUND: 'evaluation.listRuns.error.testCasesNotFound',
INTERRUPTED: 'evaluation.listRuns.error.executionInterrupted',
UNKNOWN_ERROR: 'evaluation.listRuns.error.unknownError',
EVALUATION_TRIGGER_NOT_FOUND: 'evaluation.listRuns.error.evaluationTriggerNotFound',
EVALUATION_TRIGGER_NOT_CONFIGURED: 'evaluation.listRuns.error.evaluationTriggerNotConfigured',
EVALUATION_TRIGGER_DISABLED: 'evaluation.listRuns.error.evaluationTriggerDisabled',
SET_OUTPUTS_NODE_NOT_FOUND: 'evaluation.listRuns.error.setOutputsNodeNotFound',
SET_OUTPUTS_NODE_NOT_CONFIGURED: 'evaluation.listRuns.error.setOutputsNodeNotConfigured',
SET_METRICS_NODE_NOT_FOUND: 'evaluation.listRuns.error.setMetricsNodeNotFound',
SET_METRICS_NODE_NOT_CONFIGURED: 'evaluation.listRuns.error.setMetricsNodeNotConfigured',
CANT_FETCH_TEST_CASES: 'evaluation.listRuns.error.cantFetchTestCases',
PARTIAL_CASES_FAILED: 'evaluation.runDetail.error.partialCasesFailed',
} as const;
export const getErrorBaseKey = (errorCode?: string): string => {
return (
testCaseErrorDictionary[errorCode as TestCaseExecutionErrorCodes] ??
testRunErrorDictionary[errorCode as TestRunErrorCode] ??
''
);
};

View File

@@ -0,0 +1,34 @@
import type { TestRunRecord } from '@/api/evaluation.ee';
import type { IconColor } from '@n8n/design-system/types/icon';
export const statusDictionary: Record<TestRunRecord['status'], { icon: string; color: IconColor }> =
{
new: {
icon: 'status-new',
color: 'foreground-xdark',
},
running: {
icon: 'spinner',
color: 'secondary',
},
completed: {
icon: 'status-completed',
color: 'success',
},
error: {
icon: 'exclamation-triangle',
color: 'danger',
},
cancelled: {
icon: 'status-canceled',
color: 'foreground-xdark',
},
warning: {
icon: 'status-warning',
color: 'warning',
},
success: {
icon: 'status-completed',
color: 'success',
},
};

View File

@@ -1,13 +1,13 @@
import { describe, it, expect } from 'vitest';
import { useMetricsChart } from '../composables/useMetricsChart';
import type { TestRunRecord as Record } from '@/api/testDefinition.ee';
import type { TestRunRecord as Record } from '@/api/evaluation.ee';
type TestRunRecord = Record & { index: number };
describe('useMetricsChart', () => {
const mockRuns: TestRunRecord[] = [
{
id: '1',
testDefinitionId: 'test1',
workflowId: 'workflow1',
status: 'completed',
createdAt: '2025-01-06T10:00:00Z',
updatedAt: '2025-01-06T10:00:00Z',
@@ -18,7 +18,7 @@ describe('useMetricsChart', () => {
},
{
id: '2',
testDefinitionId: 'test1',
workflowId: 'workflow1',
status: 'completed',
createdAt: '2025-01-06T10:00:00Z',
updatedAt: '2025-01-06T10:00:00Z',

View File

@@ -49,12 +49,7 @@ const githubButtonHidden = useLocalStorage(LOCAL_STORAGE_HIDE_GITHUB_STAR_BUTTON
// This is used to determine which tab to show when the route changes
// TODO: It might be easier to manage this in the router config, by passing meta information to the routes
// This would allow us to specify it just once on the root route, and then have the tabs be determined for children
const testDefinitionRoutes: VIEWS[] = [
VIEWS.TEST_DEFINITION,
VIEWS.TEST_DEFINITION_EDIT,
VIEWS.TEST_DEFINITION_RUNS_DETAIL,
VIEWS.TEST_DEFINITION_RUNS_COMPARE,
];
const evaluationRoutes: VIEWS[] = [VIEWS.EVALUATION_EDIT, VIEWS.EVALUATION_RUNS_DETAIL];
const workflowRoutes: VIEWS[] = [VIEWS.WORKFLOW, VIEWS.NEW_WORKFLOW, VIEWS.EXECUTION_DEBUG];
@@ -71,7 +66,7 @@ const tabBarItems = computed(() => {
if (posthogStore.isFeatureEnabled(WORKFLOW_EVALUATION_EXPERIMENT)) {
items.push({
value: MAIN_HEADER_TABS.TEST_DEFINITION,
value: MAIN_HEADER_TABS.EVALUATION,
label: locale.baseText('generic.tests'),
});
}
@@ -126,14 +121,14 @@ onMounted(async () => {
function isViewRoute(name: unknown): name is VIEWS {
return (
typeof name === 'string' &&
[testDefinitionRoutes, workflowRoutes, executionRoutes].flat().includes(name as VIEWS)
[evaluationRoutes, workflowRoutes, executionRoutes].flat().includes(name as VIEWS)
);
}
function syncTabsWithRoute(to: RouteLocation, from?: RouteLocation): void {
// Map route types to their corresponding tab in the header
const routeTabMapping = [
{ routes: testDefinitionRoutes, tab: MAIN_HEADER_TABS.TEST_DEFINITION },
{ routes: evaluationRoutes, tab: MAIN_HEADER_TABS.EVALUATION },
{ routes: executionRoutes, tab: MAIN_HEADER_TABS.EXECUTIONS },
{ routes: workflowRoutes, tab: MAIN_HEADER_TABS.WORKFLOW },
];
@@ -172,9 +167,8 @@ function onTabSelected(tab: MAIN_HEADER_TABS, event: MouseEvent) {
void navigateToExecutionsView(openInNewTab);
break;
case MAIN_HEADER_TABS.TEST_DEFINITION:
activeHeaderTab.value = MAIN_HEADER_TABS.TEST_DEFINITION;
void router.push({ name: VIEWS.TEST_DEFINITION });
case MAIN_HEADER_TABS.EVALUATION:
void navigateToEvaluationsView(openInNewTab);
break;
default:
@@ -230,6 +224,25 @@ async function navigateToExecutionsView(openInNewTab: boolean) {
}
}
async function navigateToEvaluationsView(openInNewTab: boolean) {
const routeWorkflowId =
workflowId.value === PLACEHOLDER_EMPTY_WORKFLOW_ID ? 'new' : workflowId.value;
const routeToNavigateTo: RouteLocationRaw = {
name: VIEWS.EVALUATION_EDIT,
params: { name: routeWorkflowId },
};
if (openInNewTab) {
const { href } = router.resolve(routeToNavigateTo);
window.open(href, '_blank');
} else if (route.name !== routeToNavigateTo.name) {
dirtyState.value = uiStore.stateIsDirty;
workflowToReturnTo.value = workflowId.value;
activeHeaderTab.value = MAIN_HEADER_TABS.EXECUTIONS;
await router.push(routeToNavigateTo);
}
}
function hideGithubButton() {
githubButtonHidden.value = true;
}

View File

@@ -7,6 +7,7 @@ import {
REGULAR_NODE_CREATOR_VIEW,
TRIGGER_NODE_CREATOR_VIEW,
AI_UNCATEGORIZED_CATEGORY,
AI_EVALUATION,
} from '@/constants';
import { useNodeCreatorStore } from '@/stores/nodeCreator.store';
@@ -126,6 +127,7 @@ watch(
[AI_NODE_CREATOR_VIEW]: AIView,
[AI_OTHERS_NODE_CREATOR_VIEW]: AINodesView,
[AI_UNCATEGORIZED_CATEGORY]: AINodesView,
[AI_EVALUATION]: AINodesView,
};
const itemKey = selectedView;

View File

@@ -4,7 +4,6 @@ import {
AI_CATEGORY_TOOLS,
AI_SUBCATEGORY,
CUSTOM_API_CALL_KEY,
EVALUATION_TRIGGER,
HTTP_REQUEST_NODE_TYPE,
} from '@/constants';
import { memoize, startCase } from 'lodash-es';
@@ -20,7 +19,7 @@ import { i18n } from '@/plugins/i18n';
import { getCredentialOnlyNodeType } from '@/utils/credentialOnlyNodes';
import { formatTriggerActionName } from '../utils';
import { usePostHog } from '@/stores/posthog.store';
import { useEvaluationStore } from '@/stores/evaluation.store.ee';
const PLACEHOLDER_RECOMMENDED_ACTION_KEY = 'placeholder_recommended';
@@ -332,15 +331,10 @@ export function useActionsGenerator() {
nodeTypes: INodeTypeDescription[],
httpOnlyCredentials: ICredentialType[],
) {
const posthogStore = usePostHog();
const isEvaluationVariantEnabled = posthogStore.isVariantEnabled(
EVALUATION_TRIGGER.name,
EVALUATION_TRIGGER.variant,
);
const evaluationStore = useEvaluationStore();
const visibleNodeTypes = nodeTypes.filter((node) => {
if (isEvaluationVariantEnabled) {
if (evaluationStore.isEvaluationEnabled) {
return true;
}
return (

View File

@@ -404,7 +404,7 @@ describe('useActionsGenerator', () => {
});
it('should not return evaluation or evaluation trigger node if variant is not enabled', () => {
vi.spyOn(posthogStore, 'isVariantEnabled').mockReturnValue(false);
vi.spyOn(posthogStore, 'isFeatureEnabled').mockReturnValue(false);
const node: INodeTypeDescription = {
...baseV2NodeWoProps,

View File

@@ -57,20 +57,17 @@ import {
AI_CODE_TOOL_LANGCHAIN_NODE_TYPE,
AI_WORKFLOW_TOOL_LANGCHAIN_NODE_TYPE,
HUMAN_IN_THE_LOOP_CATEGORY,
EVALUATION_TRIGGER,
} from '@/constants';
import { useI18n } from '@/composables/useI18n';
import { useNodeTypesStore } from '@/stores/nodeTypes.store';
import type { SimplifiedNodeType } from '@/Interface';
import type { INodeTypeDescription, Themed } from 'n8n-workflow';
import type { INodeTypeDescription, NodeConnectionType, Themed } from 'n8n-workflow';
import { EVALUATION_TRIGGER_NODE_TYPE, NodeConnectionTypes } from 'n8n-workflow';
import type { NodeConnectionType } from 'n8n-workflow';
import { useTemplatesStore } from '@/stores/templates.store';
import type { BaseTextKey } from '@/plugins/i18n';
import { camelCase } from 'lodash-es';
import { useSettingsStore } from '@/stores/settings.store';
import { usePostHog } from '@/stores/posthog.store';
import { useEvaluationStore } from '@/stores/evaluation.store.ee';
export interface NodeViewItemSection {
key: string;
title: string;
@@ -169,14 +166,10 @@ export function AIView(_nodes: SimplifiedNodeType[]): NodeView {
const i18n = useI18n();
const nodeTypesStore = useNodeTypesStore();
const templatesStore = useTemplatesStore();
const posthogStore = usePostHog();
const evaluationStore = useEvaluationStore();
const isEvaluationEnabled = evaluationStore.isEvaluationEnabled;
const isEvaluationVariantEnabled = posthogStore.isVariantEnabled(
EVALUATION_TRIGGER.name,
EVALUATION_TRIGGER.variant,
);
const evaluationNode = getEvaluationNode(nodeTypesStore, isEvaluationVariantEnabled);
const evaluationNode = getEvaluationNode(nodeTypesStore, isEvaluationEnabled);
const chainNodes = getAiNodesBySubcategory(nodeTypesStore.allLatestNodeTypes, AI_CATEGORY_CHAINS);
const agentNodes = getAiNodesBySubcategory(nodeTypesStore.allLatestNodeTypes, AI_CATEGORY_AGENTS);
@@ -368,13 +361,10 @@ export function AINodesView(_nodes: SimplifiedNodeType[]): NodeView {
export function TriggerView() {
const i18n = useI18n();
const posthogStore = usePostHog();
const isEvaluationVariantEnabled = posthogStore.isVariantEnabled(
EVALUATION_TRIGGER.name,
EVALUATION_TRIGGER.variant,
);
const evaluationStore = useEvaluationStore();
const isEvaluationEnabled = evaluationStore.isEvaluationEnabled;
const evaluationTriggerNode = isEvaluationVariantEnabled
const evaluationTriggerNode = isEvaluationEnabled
? {
key: EVALUATION_TRIGGER_NODE_TYPE,
type: 'node',

View File

@@ -1,32 +0,0 @@
<script setup lang="ts"></script>
<template>
<div :class="$style.blockArrow">
<div :class="$style.stalk"></div>
<div :class="$style.arrowHead"></div>
</div>
</template>
<style module lang="scss">
.blockArrow {
display: flex;
flex-direction: column;
align-items: center;
}
.stalk {
min-height: 14px;
width: 2px;
background-color: var(--color-foreground-xdark);
flex: 1;
}
.arrowHead {
width: 0;
height: 0;
border-left: 5px solid transparent;
border-right: 5px solid transparent;
border-top: 10px solid var(--color-foreground-xdark);
}
</style>

View File

@@ -1,84 +0,0 @@
<script setup lang="ts">
import { useI18n } from '@/composables/useI18n';
import type { EditableField } from '../types';
interface Props {
modelValue: EditableField<string>;
startEditing: (field: 'description') => void;
saveChanges: (field: 'description') => void;
handleKeydown: (e: KeyboardEvent, field: 'description') => void;
}
defineProps<Props>();
defineEmits<{ 'update:modelValue': [value: EditableField<string>] }>();
const locale = useI18n();
</script>
<template>
<div :class="$style.description">
<template v-if="!modelValue.isEditing">
<span :class="$style.descriptionText" @click="startEditing('description')">
<n8n-icon
v-if="modelValue.value.length === 0"
:class="$style.icon"
icon="plus"
color="text-light"
size="medium"
/>
<N8nText size="medium">
{{ modelValue.value.length > 0 ? modelValue.value : 'Add a description' }}
</N8nText>
</span>
<n8n-icon-button
:class="$style.editInputButton"
icon="pen"
type="tertiary"
@click="startEditing('description')"
/>
</template>
<N8nInput
v-else
ref="descriptionInput"
data-test-id="evaluation-description-input"
:model-value="modelValue.tempValue"
type="textarea"
:placeholder="locale.baseText('testDefinition.edit.descriptionPlaceholder')"
@update:model-value="$emit('update:modelValue', { ...modelValue, tempValue: $event })"
@blur="() => saveChanges('description')"
@keydown="(e: KeyboardEvent) => handleKeydown(e, 'description')"
/>
</div>
</template>
<style module lang="scss">
.description {
display: flex;
align-items: center;
color: var(--color-text-light);
font-size: var(--font-size-s);
&:hover {
.editInputButton {
opacity: 1;
}
}
}
.descriptionText {
display: block;
white-space: nowrap;
overflow: hidden;
text-overflow: ellipsis;
.icon {
margin-right: var(--spacing-2xs);
}
}
.editInputButton {
--button-font-color: var(--prim-gray-490);
opacity: 0;
border: none;
}
</style>

View File

@@ -1,219 +0,0 @@
<script setup lang="ts">
import { useI18n } from '@/composables/useI18n';
import { type Modifier, detectOverflow } from '@popperjs/core';
import { N8nInfoTip, N8nText, N8nTooltip } from '@n8n/design-system';
import { computed, ref, useCssModule } from 'vue';
interface EvaluationStep {
title?: string;
warning?: boolean;
expanded?: boolean;
description?: string;
issues?: Array<{ field: string; message: string }>;
showIssues?: boolean;
tooltip: string;
externalTooltip?: boolean;
}
const props = withDefaults(defineProps<EvaluationStep>(), {
description: '',
warning: false,
expanded: false,
issues: () => [],
showIssues: true,
title: '',
});
const locale = useI18n();
const isExpanded = ref(props.expanded);
const $style = useCssModule();
const hasIssues = computed(() => props.issues.length > 0);
const containerClass = computed(() => {
return {
[$style.evaluationStep]: true,
[$style['has-issues']]: true,
};
});
const toggleExpand = () => (isExpanded.value = !isExpanded.value);
const renderIssues = computed(() => props.showIssues && props.issues.length);
const issuesList = computed(() => props.issues.map((issue) => issue.message).join(', '));
/**
* @see https://popper.js.org/docs/v2/modifiers/#custom-modifiers
*/
const resizeModifier: Modifier<'resize', {}> = {
name: 'resize',
enabled: true,
phase: 'beforeWrite',
requires: ['preventOverflow'],
fn({ state }) {
const overflow = detectOverflow(state);
const MARGIN_RIGHT = 15;
const maxWidth = state.rects.popper.width - overflow.right - MARGIN_RIGHT;
state.styles.popper.width = `${maxWidth}px`;
},
};
const popperModifiers = [
resizeModifier,
{ name: 'preventOverflow', options: { boundary: 'document' } },
{ name: 'flip', enabled: false }, // prevent the tooltip from flipping
];
</script>
<template>
<div :class="containerClass" data-test-id="evaluation-step">
<div :class="$style.content">
<N8nTooltip
placement="right"
:disabled="!externalTooltip"
:show-arrow="false"
:popper-class="$style.evaluationTooltip"
:popper-options="{ modifiers: popperModifiers }"
:content="tooltip"
>
<div :class="$style.header" @click="toggleExpand">
<div :class="$style.label">
<N8nText bold>
<slot v-if="$slots.title" name="title" />
<template v-else>{{ title }}</template>
</N8nText>
<N8nInfoTip
v-if="!externalTooltip"
:class="$style.infoTip"
:bold="true"
type="tooltip"
theme="info"
tooltip-placement="top"
:enterable="false"
>
{{ tooltip }}
</N8nInfoTip>
</div>
<div :class="$style.actions">
<N8nInfoTip
v-if="renderIssues"
:bold="true"
type="tooltip"
theme="warning"
tooltip-placement="top"
:enterable="false"
>
{{ issuesList }}
</N8nInfoTip>
<N8nText
v-if="$slots.cardContent"
data-test-id="evaluation-step-collapse-button"
size="xsmall"
:color="hasIssues ? 'primary' : 'text-base'"
bold
>
{{
isExpanded
? locale.baseText('testDefinition.edit.step.collapse')
: locale.baseText('testDefinition.edit.step.configure')
}}
<font-awesome-icon :icon="isExpanded ? 'angle-up' : 'angle-down'" size="lg" />
</N8nText>
</div>
</div>
</N8nTooltip>
<div v-if="$slots.cardContent && isExpanded" :class="$style.cardContentWrapper">
<div :class="$style.cardContent" data-test-id="evaluation-step-content">
<N8nText v-if="description" size="small" color="text-light">{{ description }}</N8nText>
<slot name="cardContent" />
</div>
</div>
</div>
</div>
</template>
<style module lang="scss">
.evaluationStep {
display: grid;
grid-template-columns: 1fr;
background: var(--color-background-xlight);
border-radius: var(--border-radius-large);
border: var(--border-base);
width: 100%;
color: var(--color-text-dark);
}
.evaluationTooltip {
&:global(.el-popper) {
background-color: transparent;
font-size: var(--font-size-xs);
color: var(--color-text-light);
line-height: 1rem;
max-width: 25rem;
}
}
.icon {
display: flex;
align-items: center;
justify-content: center;
border-radius: var(--border-radius-base);
overflow: hidden;
width: 2rem;
height: 2rem;
&.warning {
background-color: var(--color-warning-tint-2);
}
}
.content {
display: grid;
}
.header {
display: flex;
gap: var(--spacing-2xs);
align-items: center;
cursor: pointer;
padding: var(--spacing-s);
}
.label {
display: flex;
align-items: center;
gap: var(--spacing-4xs);
}
.infoTip {
opacity: 0;
}
.evaluationStep:hover .infoTip {
opacity: 1;
}
.actions {
margin-left: auto;
display: flex;
gap: var(--spacing-2xs);
}
.cardContent {
font-size: var(--font-size-s);
padding: 0 var(--spacing-s);
margin: var(--spacing-s) 0;
}
.cardContentWrapper {
border-top: var(--border-base);
}
.has-issues {
/**
* This comment is needed or the css module
* will interpret as undefined
*/
}
</style>

View File

@@ -1,247 +0,0 @@
<script setup lang="ts">
import Canvas from '@/components/canvas/Canvas.vue';
import CanvasNode from '@/components/canvas/elements/nodes/CanvasNode.vue';
import { useCanvasMapping } from '@/composables/useCanvasMapping';
import { useCanvasOperations } from '@/composables/useCanvasOperations';
import { useI18n } from '@/composables/useI18n';
import { useTelemetry } from '@/composables/useTelemetry';
import { useNodeTypesStore } from '@/stores/nodeTypes.store';
import { useWorkflowsStore } from '@/stores/workflows.store';
import type { CanvasConnectionPort, CanvasNodeData } from '@/types';
import { N8nButton, N8nHeading, N8nSpinner, N8nText, N8nTooltip } from '@n8n/design-system';
import { useVueFlow } from '@vue-flow/core';
import { computed, onMounted, ref } from 'vue';
import { useRoute, useRouter } from 'vue-router';
const workflowsStore = useWorkflowsStore();
const nodeTypesStore = useNodeTypesStore();
const route = useRoute();
const router = useRouter();
const locale = useI18n();
const telemetry = useTelemetry();
const { resetWorkspace, initializeWorkspace } = useCanvasOperations({ router });
const uuid = crypto.randomUUID();
type PinnedNode = { name: string; id: string };
const model = defineModel<PinnedNode[]>({ required: true });
const isLoading = ref(false);
const workflowId = computed(() => route.params.name as string);
const testId = computed(() => route.params.testId as string);
const workflow = computed(() => workflowsStore.getWorkflowById(workflowId.value));
const workflowObject = computed(() => workflowsStore.getCurrentWorkflow(true));
const canvasId = computed(() => `${uuid}-${testId.value}`);
const { onNodesInitialized, fitView, zoomTo, onNodeClick, viewport } = useVueFlow({
id: canvasId.value,
});
const nodes = computed(() => workflow.value.nodes ?? []);
const connections = computed(() => workflow.value.connections);
const { nodes: mappedNodes, connections: mappedConnections } = useCanvasMapping({
nodes,
connections,
workflowObject,
});
async function loadData() {
isLoading.value = true;
workflowsStore.resetState();
resetWorkspace();
await Promise.all([
nodeTypesStore.getNodeTypes(),
workflowsStore.fetchWorkflow(workflowId.value),
]);
// remove editor pinned data
workflow.value.pinData = {};
initializeWorkspace(workflow.value);
}
function getNodeNameById(id: string) {
return mappedNodes.value.find((node) => node.id === id)?.data?.name;
}
function isMocked(data: CanvasNodeData) {
return model.value.some((node) => node.id === data.id);
}
function canBeMocked(outputs: CanvasConnectionPort[], inputs: CanvasConnectionPort[]) {
return outputs.length === 1 && inputs.length >= 1;
}
function handleNodeClick(data: CanvasNodeData) {
const nodeName = getNodeNameById(data.id);
if (!nodeName || !canBeMocked(data.outputs, data.inputs)) return;
const mocked = isMocked(data);
model.value = mocked
? model.value.filter((node) => node.id !== data.id)
: model.value.concat({ name: nodeName, id: data.id });
if (!mocked) {
telemetry.track('User selected node to be mocked', {
node_id: data.id,
test_id: testId.value,
});
}
}
function tooltipContent(data: CanvasNodeData) {
if (nodeTypesStore.isTriggerNode(data.type)) {
return locale.baseText('testDefinition.edit.nodesPinning.triggerTooltip');
}
if (!canBeMocked(data.outputs, data.inputs)) {
return;
}
if (isMocked(data)) {
return locale.baseText('testDefinition.edit.nodesPinning.pinButtonTooltip.pinned');
} else {
return locale.baseText('testDefinition.edit.nodesPinning.pinButtonTooltip');
}
}
function tooltipOffset(data: CanvasNodeData) {
if (nodeTypesStore.isTriggerNode(data.type)) return;
return 45 * viewport.value.zoom;
}
function tooltipProps(data: CanvasNodeData) {
const content = tooltipContent(data);
return {
disabled: !content,
content,
offset: tooltipOffset(data),
};
}
onNodeClick(({ node }) => handleNodeClick(node.data));
onNodesInitialized(async () => {
await fitView();
await zoomTo(0.7);
// Wait for the zoom to be applied and the canvas edges to recompute
await new Promise((resolve) => setTimeout(resolve, 400));
isLoading.value = false;
});
onMounted(loadData);
</script>
<template>
<div v-if="mappedNodes.length === 0" :class="$style.noNodes">
<N8nHeading size="large" :bold="true" :class="$style.noNodesTitle">
{{ locale.baseText('testDefinition.edit.pinNodes.noNodes.title') }}
</N8nHeading>
<N8nText>{{ locale.baseText('testDefinition.edit.pinNodes.noNodes.description') }}</N8nText>
</div>
<div v-else :class="$style.container">
<N8nSpinner v-if="isLoading" size="large" type="dots" :class="$style.spinner" />
<Canvas
:id="canvasId"
:loading="isLoading"
:nodes="mappedNodes"
:connections="mappedConnections"
:show-bug-reporting-button="false"
:read-only="true"
>
<template #node="{ nodeProps }">
<N8nTooltip placement="top" v-bind="tooltipProps(nodeProps.data)">
<CanvasNode
v-bind="nodeProps"
:class="{
[$style.isTrigger]: nodeTypesStore.isTriggerNode(nodeProps.data.type),
[$style.mockNode]: true,
}"
>
<template #toolbar="{ data, outputs, inputs }">
<div
v-if="canBeMocked(outputs, inputs)"
:class="{
[$style.pinButtonContainer]: true,
[$style.pinButtonContainerPinned]: isMocked(data),
}"
>
<N8nButton
icon="thumbtack"
block
type="secondary"
:class="{ [$style.customSecondary]: isMocked(data) }"
data-test-id="node-pin-button"
>
<template v-if="isMocked(data)">
{{ locale.baseText('contextMenu.unpin') }}
</template>
<template v-else> {{ locale.baseText('contextMenu.pin') }}</template>
</N8nButton>
</div>
</template>
</CanvasNode>
</N8nTooltip>
</template>
</Canvas>
</div>
</template>
<style lang="scss" module>
.mockNode {
// remove selection outline
--color-canvas-selected-transparent: transparent;
}
.isTrigger {
--canvas-node--border-color: var(--color-secondary);
}
.container {
width: 100%;
height: 100%;
border: 1px solid var(--color-foreground-light);
border-radius: 8px;
}
.pinButtonContainer {
position: absolute;
right: 50%;
bottom: -5px;
height: calc(100% + 47px);
border: 1px solid transparent;
padding: 5px 5px;
border-radius: 8px;
width: calc(100% + 10px);
transform: translateX(50%);
&.pinButtonContainerPinned {
background-color: var(--color-secondary);
}
}
.customSecondary {
--button-background-color: var(--color-secondary);
--button-font-color: var(--color-button-primary-font);
--button-border-color: var(--color-secondary);
--button-hover-background-color: var(--color-secondary);
--button-hover-border-color: var(--color-button-primary-font);
--button-hover-font-color: var(--color-button-primary-font);
}
.spinner {
position: absolute;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
}
.noNodes {
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
height: 100%;
}
</style>

View File

@@ -1,113 +0,0 @@
<script setup lang="ts">
import { useI18n } from '@/composables/useI18n';
import type { ITag } from '@/Interface';
import { createEventBus } from '@n8n/utils/event-bus';
import { computed } from 'vue';
import type { EditableField } from '../types';
export interface TagsInputProps {
modelValue: EditableField<string[]>;
allTags: ITag[];
tagsById: Record<string, ITag>;
isLoading: boolean;
startEditing: (field: 'tags') => void;
saveChanges: (field: 'tags') => void;
cancelEditing: (field: 'tags') => void;
createTag?: (name: string) => Promise<ITag>;
}
const props = withDefaults(defineProps<TagsInputProps>(), {
modelValue: () => ({
isEditing: false,
value: [],
tempValue: [],
}),
createTag: undefined,
});
const emit = defineEmits<{ 'update:modelValue': [value: TagsInputProps['modelValue']] }>();
const locale = useI18n();
const tagsEventBus = createEventBus();
/**
* Compute the tag name by ID
*/
const getTagName = computed(() => (tagId: string) => {
return props.tagsById[tagId]?.name ?? '';
});
/**
* Update the tempValue of the tags when the dropdown changes.
* This does not finalize the changes; that happens on blur or hitting enter.
*/
function updateTags(tags: string[]) {
emit('update:modelValue', {
...props.modelValue,
tempValue: tags,
});
}
</script>
<template>
<div data-test-id="workflow-tags-field">
<n8n-input-label
:label="locale.baseText('testDefinition.edit.tagName')"
:bold="false"
size="small"
>
<!-- Read-only view -->
<div v-if="!modelValue.isEditing" :class="$style.tagsRead" @click="startEditing('tags')">
<n8n-text v-if="modelValue.value.length === 0" size="small">
{{ locale.baseText('testDefinition.edit.selectTag') }}
</n8n-text>
<n8n-tag
v-for="tagId in modelValue.value"
:key="tagId"
:text="getTagName(tagId)"
data-test-id="evaluation-tag-field"
/>
<n8n-icon-button
:class="$style.editInputButton"
icon="pen"
type="tertiary"
size="small"
transparent
/>
</div>
<!-- Editing view -->
<TagsDropdown
v-else
:model-value="modelValue.tempValue"
:placeholder="locale.baseText('executionAnnotationView.chooseOrCreateATag')"
:create-enabled="modelValue.tempValue.length === 0"
:all-tags="allTags"
:is-loading="isLoading"
:tags-by-id="tagsById"
data-test-id="workflow-tags-dropdown"
:event-bus="tagsEventBus"
:create-tag="createTag"
:manage-enabled="false"
:multiple-limit="1"
@update:model-value="updateTags"
@esc="cancelEditing('tags')"
@blur="saveChanges('tags')"
/>
</n8n-input-label>
</div>
</template>
<style module lang="scss">
.tagsRead {
&:hover .editInputButton {
opacity: 1;
}
}
.editInputButton {
opacity: 0;
border: none;
--button-font-color: var(--prim-gray-490);
}
</style>

View File

@@ -1,69 +0,0 @@
<script setup lang="ts">
import { useI18n } from '@/composables/useI18n';
import type { EditableField } from '../types';
export interface EvaluationHeaderProps {
modelValue: EditableField<string>;
startEditing: (field: 'name') => void;
saveChanges: (field: 'name') => void;
handleKeydown: (e: KeyboardEvent, field: 'name') => void;
}
defineEmits<{ 'update:modelValue': [value: EditableField<string>] }>();
defineProps<EvaluationHeaderProps>();
const locale = useI18n();
</script>
<template>
<h2 :class="$style.title">
<template v-if="!modelValue.isEditing">
<span :class="$style.titleText">
{{ modelValue.value }}
</span>
<n8n-icon-button
:class="$style.editInputButton"
icon="pen"
type="tertiary"
@click="startEditing('name')"
/>
</template>
<N8nInput
v-else
ref="nameInput"
data-test-id="evaluation-name-input"
:model-value="modelValue.tempValue"
type="text"
:placeholder="locale.baseText('testDefinition.edit.namePlaceholder')"
@update:model-value="$emit('update:modelValue', { ...modelValue, tempValue: $event })"
@blur="() => saveChanges('name')"
@keydown="(e: KeyboardEvent) => handleKeydown(e, 'name')"
/>
</h2>
</template>
<style module lang="scss">
.title {
margin: 0;
font-size: var(--font-size-xl);
font-weight: var(--font-weight-bold);
color: var(--color-text-dark);
display: flex;
align-items: center;
max-width: 100%;
overflow: hidden;
.titleText {
display: block;
white-space: nowrap;
overflow: hidden;
text-overflow: ellipsis;
}
}
.editInputButton {
--button-font-color: var(--prim-gray-490);
opacity: 0.2;
border: none;
}
</style>

View File

@@ -1,122 +0,0 @@
<script setup lang="ts">
import { useI18n } from '@/composables/useI18n';
import { VIEWS } from '@/constants';
import { SAMPLE_EVALUATION_WORKFLOW } from '@/constants.workflows';
import type { IWorkflowDataCreate } from '@/Interface';
import { useProjectsStore } from '@/stores/projects.store';
import { useWorkflowsStore } from '@/stores/workflows.store';
import { N8nButton, N8nLink } from '@n8n/design-system';
import type { INodeParameterResourceLocator, IPinData } from 'n8n-workflow';
import { computed, ref } from 'vue';
import { useRouter } from 'vue-router';
interface WorkflowSelectorProps {
modelValue: INodeParameterResourceLocator;
examplePinnedData?: IPinData;
sampleWorkflowName?: string;
}
const props = withDefaults(defineProps<WorkflowSelectorProps>(), {
modelValue: () => ({
mode: 'id',
value: '',
__rl: true,
}),
examplePinnedData: () => ({}),
sampleWorkflowName: undefined,
});
const emit = defineEmits<{
'update:modelValue': [value: WorkflowSelectorProps['modelValue']];
workflowCreated: [workflowId: string];
}>();
const locale = useI18n();
const projectStore = useProjectsStore();
const workflowsStore = useWorkflowsStore();
const router = useRouter();
const subworkflowName = computed(() => {
if (props.sampleWorkflowName) {
return locale.baseText('testDefinition.workflowInput.subworkflowName', {
interpolate: { name: props.sampleWorkflowName },
});
}
return locale.baseText('testDefinition.workflowInput.subworkflowName.default');
});
const sampleWorkflow = computed<IWorkflowDataCreate>(() => {
return {
...SAMPLE_EVALUATION_WORKFLOW,
name: subworkflowName.value,
pinData: props.examplePinnedData,
};
});
const selectorVisible = ref(false);
const updateModelValue = (value: INodeParameterResourceLocator) => emit('update:modelValue', value);
/**
* copy pasted from WorkflowSelectorParameterInput.vue
* but we should remove it from here
*/
const handleDefineEvaluation = async () => {
const projectId = projectStore.currentProjectId;
const workflowName = sampleWorkflow.value.name ?? 'My Sub-Workflow';
const sampleSubWorkflows = workflowsStore.allWorkflows.filter(
(w) => w.name && new RegExp(workflowName).test(w.name),
);
const workflow: IWorkflowDataCreate = {
...sampleWorkflow.value,
name: `${workflowName} ${sampleSubWorkflows.length + 1}`,
};
if (projectId) {
workflow.projectId = projectId;
}
const newWorkflow = await workflowsStore.createNewWorkflow(workflow);
const { href } = router.resolve({ name: VIEWS.WORKFLOW, params: { name: newWorkflow.id } });
updateModelValue({
...props.modelValue,
value: newWorkflow.id,
cachedResultName: workflow.name,
});
window.open(href, '_blank');
};
</script>
<template>
<div class="mt-xs">
<template v-if="!modelValue.value">
<N8nButton type="secondary" class="mb-xs" @click="handleDefineEvaluation">
{{ locale.baseText('testDefinition.workflow.createNew') }}
</N8nButton>
<N8nLink class="mb-xs" style="display: block" @click="selectorVisible = !selectorVisible">
{{ locale.baseText('testDefinition.workflow.createNew.or') }}
</N8nLink>
</template>
<WorkflowSelectorParameterInput
v-if="modelValue.value || selectorVisible"
:parameter="{
displayName: locale.baseText('testDefinition.edit.workflowSelectorDisplayName'),
name: 'workflowId',
type: 'workflowSelector',
default: '',
}"
:model-value="modelValue"
:display-title="locale.baseText('testDefinition.edit.workflowSelectorTitle')"
:is-value-expression="false"
:expression-edit-dialog-visible="false"
:path="'workflows'"
:allow-new="false"
:sample-workflow="sampleWorkflow"
:new-resource-label="locale.baseText('testDefinition.workflow.createNew')"
@update:model-value="updateModelValue"
@workflow-created="emit('workflowCreated', $event)"
/>
</div>
</template>

View File

@@ -1,216 +0,0 @@
<script setup lang="ts">
import BlockArrow from '@/components/TestDefinition/EditDefinition/BlockArrow.vue';
import EvaluationStep from '@/components/TestDefinition/EditDefinition/EvaluationStep.vue';
import NodesPinning from '@/components/TestDefinition/EditDefinition/NodesPinning.vue';
import WorkflowSelector from '@/components/TestDefinition/EditDefinition/WorkflowSelector.vue';
import type { EditableFormState, EvaluationFormState } from '@/components/TestDefinition/types';
import { useI18n } from '@/composables/useI18n';
import { useMessage } from '@/composables/useMessage';
import { NODE_PINNING_MODAL_KEY } from '@/constants';
import type { ITag } from '@/Interface';
import { N8nButton, N8nHeading, N8nTag, N8nText } from '@n8n/design-system';
import type { IPinData } from 'n8n-workflow';
import { computed } from 'vue';
const props = defineProps<{
tagsById: Record<string, ITag>;
isLoading: boolean;
examplePinnedData?: IPinData;
sampleWorkflowName?: string;
hasRuns: boolean;
getFieldIssues: (key: string) => Array<{ field: string; message: string }>;
startEditing: (field: keyof EditableFormState) => void;
saveChanges: (field: keyof EditableFormState) => void;
cancelEditing: (field: keyof EditableFormState) => void;
}>();
const emit = defineEmits<{
openPinningModal: [];
openExecutionsViewForTag: [];
renameTag: [tag: string];
evaluationWorkflowCreated: [workflowId: string];
}>();
const locale = useI18n();
const tags = defineModel<EvaluationFormState['tags']>('tags', { required: true });
const renameTag = async () => {
const { prompt } = useMessage();
const result = await prompt(locale.baseText('testDefinition.edit.step.tag.placeholder'), {
inputValue: props.tagsById[tags.value.value[0]]?.name,
inputPlaceholder: locale.baseText('testDefinition.edit.step.tag.placeholder'),
inputValidator: (value) => {
if (!value) {
return locale.baseText('testDefinition.edit.step.tag.validation.required');
}
if (value.length > 21) {
return locale.baseText('testDefinition.edit.step.tag.validation.tooLong');
}
return true;
},
});
if (result?.action === 'confirm') {
emit('renameTag', result.value);
}
};
const evaluationWorkflow = defineModel<EvaluationFormState['evaluationWorkflow']>(
'evaluationWorkflow',
{ required: true },
);
const mockedNodes = defineModel<EvaluationFormState['mockedNodes']>('mockedNodes', {
required: true,
});
const selectedTag = computed(() => props.tagsById[tags.value.value[0]] ?? {});
function openExecutionsView() {
emit('openExecutionsViewForTag');
}
</script>
<template>
<div>
<div :class="$style.editForm">
<template v-if="!hasRuns">
<N8nText tag="div" color="text-dark" size="large" class="text-center">
{{ locale.baseText('testDefinition.edit.step.intro') }}
</N8nText>
<BlockArrow class="mt-5xs mb-5xs" />
</template>
<!-- Select Executions -->
<EvaluationStep
:issues="getFieldIssues('tags')"
:tooltip="locale.baseText('testDefinition.edit.step.executions.tooltip')"
:external-tooltip="!hasRuns"
>
<template #title>
{{
locale.baseText('testDefinition.edit.step.executions', {
adjustToNumber: selectedTag?.usageCount ?? 0,
})
}}
</template>
<template #cardContent>
<div :class="$style.tagInputTag">
<i18n-t keypath="testDefinition.edit.step.tag">
<template #tag>
<N8nTag :text="selectedTag.name" :clickable="true" @click="renameTag">
<template #tag>
{{ selectedTag.name }} <font-awesome-icon icon="pen" size="sm" />
</template>
</N8nTag>
</template>
</i18n-t>
</div>
<N8nButton
label="Select executions"
type="tertiary"
size="small"
@click="openExecutionsView"
/>
</template>
</EvaluationStep>
<div :class="$style.nestedSteps">
<BlockArrow class="mt-5xs mb-5xs" />
<div style="display: flex; flex-direction: column">
<BlockArrow class="mt-5xs mb-5xs ml-auto mr-2xl" />
<!-- Mocked Nodes -->
<EvaluationStep
:issues="getFieldIssues('mockedNodes')"
:tooltip="locale.baseText('testDefinition.edit.step.nodes.tooltip')"
:external-tooltip="!hasRuns"
>
<template #title>
{{
locale.baseText('testDefinition.edit.step.mockedNodes', {
adjustToNumber: mockedNodes?.length ?? 0,
})
}}
<N8nText>({{ locale.baseText('generic.optional') }})</N8nText>
</template>
<template #cardContent>
<N8nButton
size="small"
data-test-id="select-nodes-button"
:label="locale.baseText('testDefinition.edit.selectNodes')"
type="tertiary"
@click="$emit('openPinningModal')"
/>
</template>
</EvaluationStep>
<BlockArrow class="mt-5xs mb-5xs ml-auto mr-2xl" />
<!-- Re-run Executions -->
<EvaluationStep
:title="locale.baseText('testDefinition.edit.step.reRunExecutions')"
:tooltip="locale.baseText('testDefinition.edit.step.reRunExecutions.tooltip')"
:external-tooltip="!hasRuns"
/>
<BlockArrow class="mt-5xs mb-5xs ml-auto mr-2xl" />
</div>
</div>
<!-- Compare Executions -->
<EvaluationStep
:title="locale.baseText('testDefinition.edit.step.compareExecutions')"
:description="locale.baseText('testDefinition.edit.workflowSelectorLabel')"
:issues="getFieldIssues('evaluationWorkflow')"
:tooltip="locale.baseText('testDefinition.edit.step.compareExecutions.tooltip')"
:external-tooltip="!hasRuns"
>
<template #cardContent>
<WorkflowSelector
v-model="evaluationWorkflow"
:example-pinned-data="examplePinnedData"
:class="{ 'has-issues': getFieldIssues('evaluationWorkflow').length > 0 }"
:sample-workflow-name="sampleWorkflowName"
@workflow-created="$emit('evaluationWorkflowCreated', $event)"
/>
</template>
</EvaluationStep>
</div>
<Modal
width="calc(100% - (48px * 2))"
height="calc(100% - (48px * 2))"
:custom-class="$style.pinnigModal"
:name="NODE_PINNING_MODAL_KEY"
>
<template #header>
<N8nHeading tag="h3" size="xlarge" color="text-dark" class="mb-2xs">
{{ locale.baseText('testDefinition.edit.selectNodes') }}
</N8nHeading>
<N8nText color="text-base">
{{ locale.baseText('testDefinition.edit.modal.description') }}
</N8nText>
</template>
<template #content>
<NodesPinning v-model="mockedNodes" data-test-id="nodes-pinning-modal" />
</template>
</Modal>
</div>
</template>
<style module lang="scss">
.pinnigModal {
--dialog-max-width: none;
margin: 0;
}
.nestedSteps {
display: grid;
grid-template-columns: 20% 1fr;
}
.tagInputTag {
display: flex;
gap: var(--spacing-3xs);
font-size: var(--font-size-2xs);
color: var(--color-text-base);
margin-bottom: var(--spacing-xs);
}
</style>

View File

@@ -1,140 +0,0 @@
import { waitFor } from '@testing-library/vue';
import { createPinia, setActivePinia } from 'pinia';
import { createTestingPinia } from '@pinia/testing';
import NodesPinning from '../NodesPinning.vue';
import { createComponentRenderer } from '@/__tests__/render';
import { useWorkflowsStore } from '@/stores/workflows.store';
import { useNodeTypesStore } from '@/stores/nodeTypes.store';
import {
createTestNode,
createTestWorkflow,
createTestWorkflowObject,
mockNodeTypeDescription,
} from '@/__tests__/mocks';
import { mockedStore } from '@/__tests__/utils';
import { NodeConnectionTypes } from 'n8n-workflow';
import { SET_NODE_TYPE } from '@/constants';
vi.mock('vue-router', () => {
const push = vi.fn();
return {
useRouter: () => ({
push,
}),
useRoute: () => ({
params: {
name: 'test-workflow',
testId: 'test-123',
},
}),
RouterLink: {
template: '<a><slot /></a>',
},
};
});
const renderComponent = createComponentRenderer(NodesPinning, {
props: {
modelValue: [{ id: '1', name: 'Node 1' }],
},
global: {
plugins: [createTestingPinia()],
},
});
describe('NodesPinning', () => {
const workflowsStore = mockedStore(useWorkflowsStore);
const nodes = [
createTestNode({ id: '1', name: 'Node 1', type: SET_NODE_TYPE }),
createTestNode({ id: '2', name: 'Node 2', type: SET_NODE_TYPE }),
];
const nodeTypesStore = mockedStore(useNodeTypesStore);
const nodeTypeDescription = mockNodeTypeDescription({
name: SET_NODE_TYPE,
inputs: [NodeConnectionTypes.Main],
outputs: [NodeConnectionTypes.Main],
});
nodeTypesStore.nodeTypes = {
node: { 1: nodeTypeDescription },
};
nodeTypesStore.getNodeType = vi.fn().mockReturnValue(nodeTypeDescription);
const workflow = createTestWorkflow({
id: 'test-workflow',
name: 'Test Workflow',
nodes,
connections: {},
});
const workflowObject = createTestWorkflowObject(workflow);
workflowsStore.getWorkflowById = vi.fn().mockReturnValue(workflow);
workflowsStore.getCurrentWorkflow = vi.fn().mockReturnValue(workflowObject);
beforeEach(() => {
const pinia = createPinia();
setActivePinia(pinia);
nodeTypesStore.setNodeTypes([nodeTypeDescription]);
});
afterEach(() => {
vi.clearAllMocks();
});
it('should render workflow nodes', async () => {
const { container } = renderComponent();
await waitFor(() => {
expect(container.querySelectorAll('.vue-flow__node')).toHaveLength(2);
});
expect(container.querySelector('[data-node-name="Node 1"]')).toBeInTheDocument();
expect(container.querySelector('[data-node-name="Node 2"]')).toBeInTheDocument();
});
it('should update UI when pinning/unpinning nodes', async () => {
const { container, getAllByTestId } = renderComponent();
await waitFor(() => {
expect(container.querySelector('[data-node-name="Node 1"]')).toBeInTheDocument();
});
const buttons = getAllByTestId('node-pin-button');
expect(buttons.length).toBe(2);
expect(buttons[0]).toHaveTextContent('Unpin');
expect(buttons[1]).toHaveTextContent('Pin');
});
it('should emit update:modelValue when pinning nodes', async () => {
const { container, emitted, getAllByTestId } = renderComponent();
await waitFor(() => {
expect(container.querySelector('[data-node-name="Node 1"]')).toBeInTheDocument();
});
const pinButton = getAllByTestId('node-pin-button')[1];
pinButton?.click();
expect(emitted('update:modelValue')).toBeTruthy();
expect(emitted('update:modelValue')[0]).toEqual([
[
{ id: '1', name: 'Node 1' },
{ id: '2', name: 'Node 2' },
],
]);
});
it('should emit update:modelValue when unpinning nodes', async () => {
const { container, emitted, getAllByTestId } = renderComponent();
await waitFor(() => {
expect(container.querySelector('[data-node-name="Node 1"]')).toBeInTheDocument();
});
const pinButton = getAllByTestId('node-pin-button')[0];
pinButton?.click();
expect(emitted('update:modelValue')).toBeTruthy();
expect(emitted('update:modelValue')[0]).toEqual([[]]);
});
});

View File

@@ -1,119 +0,0 @@
<script setup lang="ts">
import { useI18n } from '@/composables/useI18n';
import { N8nBadge, N8nButton, N8nText } from '@n8n/design-system';
import { computed } from 'vue';
defineEmits<{ 'create-test': [] }>();
const locale = useI18n();
/**
* TODO: fully implement the logic here
*/
const canCreateEvaluations = computed(() => true);
const isRegisteredCommunity = computed(() => false);
const isNotRegisteredCommunity = computed(() => false);
const hasReachedLimit = computed(() => false);
</script>
<template>
<div :class="$style.container">
<div :class="{ [$style.card]: true, [$style.cardActive]: true }">
<N8nBadge theme="warning" size="small">New</N8nBadge>
<div :class="$style.cardContent">
<N8nText tag="h2" size="xlarge" color="text-base" class="mb-2xs">
{{ locale.baseText('testDefinition.list.evaluations') }}
</N8nText>
<N8nText tag="div" color="text-base" class="mb-s ml-s mr-s">
{{ locale.baseText('testDefinition.list.actionDescription') }}
</N8nText>
<template v-if="canCreateEvaluations">
<N8nButton @click="$emit('create-test')">
{{ locale.baseText('testDefinition.list.actionButton') }}
</N8nButton>
</template>
<template v-else-if="isRegisteredCommunity">
<N8nButton @click="$emit('create-test')">
{{ locale.baseText('testDefinition.list.actionButton') }}
</N8nButton>
<N8nText tag="div" color="text-light" size="small" class="mt-2xs">
{{ locale.baseText('testDefinition.list.actionDescription.registered') }}
</N8nText>
</template>
<template v-else-if="isNotRegisteredCommunity">
<div :class="$style.divider" class="mb-s"></div>
<N8nText tag="div" color="text-light" size="small" class="mb-s">
{{ locale.baseText('testDefinition.list.actionDescription.unregistered') }}
</N8nText>
<N8nButton>
{{ locale.baseText('testDefinition.list.actionButton.unregistered') }}
</N8nButton>
</template>
<template v-else-if="hasReachedLimit">
<div :class="$style.divider" class="mb-s"></div>
<N8nText tag="div" color="text-light" size="small" class="mb-s">
{{ locale.baseText('testDefinition.list.actionDescription.atLimit') }}
</N8nText>
<N8nButton>
{{ locale.baseText('generic.upgrade') }}
</N8nButton>
</template>
</div>
</div>
<div :class="{ [$style.card]: true, [$style.cardInActive]: true }">
<N8nBadge>
{{ locale.baseText('testDefinition.list.unitTests.badge') }}
</N8nBadge>
<div :class="$style.cardContent">
<N8nText tag="h2" size="xlarge" color="text-base" class="mb-2xs">
{{ locale.baseText('testDefinition.list.unitTests.title') }}
</N8nText>
<N8nText tag="div" color="text-base" class="mb-s">
{{ locale.baseText('testDefinition.list.unitTests.description') }}
</N8nText>
<N8nButton type="secondary">
{{ locale.baseText('testDefinition.list.unitTests.cta') }}
</N8nButton>
</div>
</div>
</div>
</template>
<style module lang="scss">
.container {
display: flex;
justify-content: center;
height: 100%;
align-items: center;
gap: 24px;
}
.card {
border-radius: var(--border-radius-base);
width: 280px;
height: 290px;
display: flex;
flex-direction: column;
align-items: center;
justify-content: space-between;
padding: 20px;
text-align: center;
}
.cardContent {
margin: auto;
}
.cardActive {
border: 1px solid var(--color-foreground-base);
background-color: var(--color-background-xlight);
}
.cardInActive {
border: 1px dashed var(--color-foreground-base);
}
.divider {
border-top: 1px solid var(--color-foreground-light);
}
</style>

View File

@@ -1,180 +0,0 @@
<script setup lang="ts">
import type { TestRunRecord } from '@/api/testDefinition.ee';
import TimeAgo from '@/components/TimeAgo.vue';
import { useI18n } from '@/composables/useI18n';
import { N8nIcon, N8nText } from '@n8n/design-system';
import type { IconColor } from '@n8n/design-system/types/icon';
import { computed } from 'vue';
const props = defineProps<{
name: string;
testCases: number;
execution?: TestRunRecord;
errors?: Array<{ field: string; message: string }>;
}>();
const locale = useI18n();
type IconDefinition = { icon: string; color: IconColor; spin?: boolean };
const statusesColorDictionary: Record<TestRunRecord['status'], IconDefinition> = {
new: {
icon: 'circle',
color: 'foreground-dark',
},
running: {
icon: 'spinner',
color: 'secondary',
spin: true,
},
completed: {
icon: 'exclamation-circle',
color: 'success',
},
error: {
icon: 'exclamation-triangle',
color: 'danger',
},
cancelled: {
icon: 'minus-circle',
color: 'foreground-xdark',
},
warning: {
icon: 'exclamation-circle',
color: 'warning',
},
success: {
icon: 'circle-check',
color: 'success',
},
} as const;
const statusRender = computed<IconDefinition & { label: string }>(() => {
if (props.errors?.length) {
return {
icon: 'adjust',
color: 'foreground-dark',
label: 'Incomplete',
};
}
if (!props.execution) {
return {
icon: 'circle',
color: 'foreground-dark',
label: 'Never ran',
};
}
return {
...statusesColorDictionary[props.execution.status],
label: props.execution.status,
};
});
</script>
<template>
<div :class="$style.testCard">
<div :class="$style.testCardContent">
<div>
<N8nText bold tag="div" :class="$style.name">{{ name }}</N8nText>
<N8nText tag="div" color="text-base" size="small">
{{
locale.baseText('testDefinition.list.item.tests', {
adjustToNumber: testCases,
})
}}
</N8nText>
</div>
<div>
<div :class="$style.status">
<N8nIcon v-bind="statusRender" size="small" />
<N8nText size="small" color="text-base">
{{ statusRender.label }}
</N8nText>
</div>
<N8nText v-if="errors?.length" tag="div" color="text-base" size="small" class="ml-m">
{{
locale.baseText('testDefinition.list.item.missingFields', {
adjustToNumber: errors.length,
})
}}
</N8nText>
<N8nText v-else-if="execution" tag="div" color="text-base" size="small" class="ml-m">
<TimeAgo :date="execution.updatedAt" />
</N8nText>
</div>
<div :class="$style.metrics">
<template v-if="execution?.metrics">
<template v-for="[key, value] in Object.entries(execution.metrics)" :key>
<N8nText
color="text-base"
size="small"
style="overflow: hidden; text-overflow: ellipsis"
>
{{ key }}
</N8nText>
<N8nText color="text-base" size="small" bold>
{{ Math.round((value + Number.EPSILON) * 100) / 100 }}
</N8nText>
</template>
</template>
</div>
</div>
<slot name="prepend"></slot>
<slot name="append"></slot>
</div>
</template>
<style module lang="scss">
.testCard {
display: flex;
align-items: center;
background-color: var(--color-background-xlight);
padding: var(--spacing-xs) 20px var(--spacing-xs) var(--spacing-m);
gap: var(--spacing-s);
border-bottom: 1px solid var(--color-foreground-base);
cursor: pointer;
&:first-child {
border-top-left-radius: inherit;
border-top-right-radius: inherit;
}
&:last-child {
border-bottom-color: transparent;
border-bottom-left-radius: inherit;
border-bottom-right-radius: inherit;
}
&:hover {
background-color: var(--color-background-light);
.name {
color: var(--color-primary);
}
}
}
.status {
display: inline-flex;
gap: 8px;
text-transform: capitalize;
align-items: center;
}
.testCardContent {
display: grid;
grid-template-columns: 2fr 1fr 1fr;
align-items: center;
flex: 1;
gap: var(--spacing-xs);
}
.metrics {
display: grid;
grid-template-columns: 120px 1fr;
column-gap: 18px;
}
</style>

View File

@@ -1,109 +0,0 @@
<script setup lang="ts">
import type { TestRunRecord } from '@/api/testDefinition.ee';
import { useI18n } from '@/composables/useI18n';
import { N8nIcon, N8nText } from '@n8n/design-system';
import type { IconColor } from '@n8n/design-system/types/icon';
import { computed } from 'vue';
import type { TestTableColumn } from '../shared/TestTableBase.vue';
import TestTableBase from '../shared/TestTableBase.vue';
const emit = defineEmits<{
rowClick: [run: TestRunRecord & { index: number }];
}>();
const props = defineProps<{
runs: Array<TestRunRecord & { index: number }>;
columns: Array<TestTableColumn<TestRunRecord & { index: number }>>;
}>();
const statusDictionary: Record<TestRunRecord['status'], { icon: string; color: IconColor }> = {
new: {
icon: 'status-new',
color: 'foreground-xdark',
},
running: {
icon: 'spinner',
color: 'secondary',
},
completed: {
icon: 'status-completed',
color: 'success',
},
error: {
icon: 'status-error',
color: 'danger',
},
cancelled: {
icon: 'status-canceled',
color: 'foreground-xdark',
},
warning: {
icon: 'status-warning',
color: 'warning',
},
success: {
icon: 'status-completed',
color: 'success',
},
};
const locale = useI18n();
// Combine test run statuses and finalResult to get the final status
const runSummaries = computed(() => {
return props.runs.map(({ status, finalResult, ...run }) => {
if (status === 'completed' && finalResult) {
return { ...run, status: finalResult };
}
return { ...run, status };
});
});
</script>
<template>
<div :class="$style.container">
<N8nHeading size="large" :bold="true" :class="$style.runsTableHeading" color="text-base">
{{ locale.baseText('testDefinition.edit.pastRuns.total', { adjustToNumber: runs.length }) }}
<N8nText> ({{ runs.length }}) </N8nText>
</N8nHeading>
<TestTableBase
:data="runSummaries"
:columns="columns"
:default-sort="{ prop: 'runAt', order: 'descending' }"
@row-click="(row) => emit('rowClick', row)"
>
<template #id="{ row }">#{{ row.index }} </template>
<template #status="{ row }">
<div
style="display: inline-flex; gap: 8px; text-transform: capitalize; align-items: center"
>
<N8nText v-if="row.status === 'running'" color="secondary" class="mr-2xs">
<AnimatedSpinner />
</N8nText>
<N8nIcon
v-else
:icon="statusDictionary[row.status].icon"
:color="statusDictionary[row.status].color"
class="mr-2xs"
/>
<template v-if="row.status === 'error'">
{{ row.status }}
</template>
<template v-else>
{{ row.status }}
</template>
</div>
</template>
</TestTableBase>
</div>
</template>
<style module lang="scss">
.container {
display: flex;
flex-direction: column;
gap: 8px;
}
</style>

View File

@@ -1,206 +0,0 @@
import { ref, computed } from 'vue';
import type { ComponentPublicInstance, ComputedRef } from 'vue';
import { useTestDefinitionStore } from '@/stores/testDefinition.store.ee';
import type AnnotationTagsDropdownEe from '@/components/AnnotationTagsDropdown.ee.vue';
import type { N8nInput } from '@n8n/design-system';
import type { UpdateTestDefinitionParams } from '@/api/testDefinition.ee';
import type { EditableField, EditableFormState, EvaluationFormState } from '../types';
type FormRefs = {
nameInput: ComponentPublicInstance<typeof N8nInput>;
tagsInput: ComponentPublicInstance<typeof AnnotationTagsDropdownEe>;
};
export function useTestDefinitionForm() {
const evaluationsStore = useTestDefinitionStore();
// State initialization
const state = ref<EvaluationFormState>({
name: {
value: `My Test ${evaluationsStore.allTestDefinitions.length + 1}`,
tempValue: '',
isEditing: false,
},
tags: {
value: [],
tempValue: [],
isEditing: false,
},
description: {
value: '',
tempValue: '',
isEditing: false,
},
evaluationWorkflow: {
mode: 'list',
value: '',
__rl: true,
},
mockedNodes: [],
});
const isSaving = ref(false);
const fields = ref<FormRefs>({} as FormRefs);
const editableFields: ComputedRef<{
name: EditableField<string>;
tags: EditableField<string[]>;
description: EditableField<string>;
}> = computed(() => ({
name: state.value.name,
tags: state.value.tags,
description: state.value.description,
}));
/**
* Load test data including metrics.
*/
const loadTestData = async (testId: string, workflowId: string) => {
try {
await evaluationsStore.fetchAll({ force: true, workflowId });
const testDefinition = evaluationsStore.testDefinitionsById[testId];
if (testDefinition) {
state.value.description = {
value: testDefinition.description ?? '',
isEditing: false,
tempValue: '',
};
state.value.name = {
value: testDefinition.name ?? '',
isEditing: false,
tempValue: '',
};
state.value.tags = {
isEditing: false,
value: testDefinition.annotationTagId ? [testDefinition.annotationTagId] : [],
tempValue: [],
};
state.value.evaluationWorkflow = {
mode: 'list',
value: testDefinition.evaluationWorkflowId ?? '',
__rl: true,
};
state.value.mockedNodes = testDefinition.mockedNodes ?? [];
evaluationsStore.updateRunFieldIssues(testDefinition.id);
}
} catch (error) {
console.error('Failed to load test data', error);
}
};
const createTest = async (workflowId: string) => {
if (isSaving.value) return;
isSaving.value = true;
try {
const params = {
name: state.value.name.value,
workflowId,
description: state.value.description.value,
};
return await evaluationsStore.create(params);
} finally {
isSaving.value = false;
}
};
const updateTest = async (testId: string) => {
if (isSaving.value) return;
isSaving.value = true;
try {
if (!testId) {
throw new Error('Test ID is required for updating a test');
}
const params: UpdateTestDefinitionParams = {
name: state.value.name.value,
description: state.value.description.value,
};
if (state.value.evaluationWorkflow.value) {
params.evaluationWorkflowId = state.value.evaluationWorkflow.value.toString();
}
const annotationTagId = state.value.tags.value[0];
if (annotationTagId) {
params.annotationTagId = annotationTagId;
}
params.mockedNodes = state.value.mockedNodes;
const response = await evaluationsStore.update({ ...params, id: testId });
return response;
} finally {
isSaving.value = false;
}
};
/**
* Start editing an editable field by copying `value` to `tempValue`.
*/
function startEditing<T extends keyof EditableFormState>(field: T) {
const fieldObj = editableFields.value[field];
if (fieldObj.isEditing) {
// Already editing, do nothing
return;
}
if (Array.isArray(fieldObj.value)) {
fieldObj.tempValue = [...fieldObj.value];
} else {
fieldObj.tempValue = fieldObj.value;
}
fieldObj.isEditing = true;
}
/**
* Save changes by copying `tempValue` back into `value`.
*/
function saveChanges<T extends keyof EditableFormState>(field: T) {
const fieldObj = editableFields.value[field];
fieldObj.value = Array.isArray(fieldObj.tempValue)
? [...fieldObj.tempValue]
: fieldObj.tempValue;
fieldObj.isEditing = false;
}
/**
* Cancel editing and revert `tempValue` from `value`.
*/
function cancelEditing<T extends keyof EditableFormState>(field: T) {
const fieldObj = editableFields.value[field];
if (Array.isArray(fieldObj.value)) {
fieldObj.tempValue = [...fieldObj.value];
} else {
fieldObj.tempValue = fieldObj.value;
}
fieldObj.isEditing = false;
}
/**
* Handle keyboard events during editing.
*/
function handleKeydown<T extends keyof EditableFormState>(event: KeyboardEvent, field: T) {
if (event.key === 'Escape') {
cancelEditing(field);
} else if (event.key === 'Enter' && !event.shiftKey) {
event.preventDefault();
saveChanges(field);
}
}
return {
state,
fields,
isSaving: computed(() => isSaving.value),
loadTestData,
createTest,
updateTest,
startEditing,
saveChanges,
cancelEditing,
handleKeydown,
};
}

View File

@@ -1,262 +0,0 @@
import { setActivePinia } from 'pinia';
import { createTestingPinia } from '@pinia/testing';
import { useTestDefinitionForm } from '../composables/useTestDefinitionForm';
import { useTestDefinitionStore } from '@/stores/testDefinition.store.ee';
import { mockedStore } from '@/__tests__/utils';
import type { TestDefinitionRecord } from '@/api/testDefinition.ee';
const TEST_DEF_A: TestDefinitionRecord = {
id: '1',
name: 'Test Definition A',
description: 'Description A',
evaluationWorkflowId: '456',
workflowId: '123',
annotationTagId: '789',
annotationTag: null,
createdAt: '2023-01-01T00:00:00.000Z',
};
const TEST_DEF_B: TestDefinitionRecord = {
id: '2',
name: 'Test Definition B',
workflowId: '123',
description: 'Description B',
annotationTag: null,
createdAt: '2023-01-01T00:00:00.000Z',
};
const TEST_DEF_NEW: TestDefinitionRecord = {
id: '3',
workflowId: '123',
name: 'New Test Definition',
description: 'New Description',
annotationTag: null,
createdAt: '2023-01-01T00:00:00.000Z',
};
beforeEach(() => {
const pinia = createTestingPinia();
setActivePinia(pinia);
});
afterEach(() => {
vi.clearAllMocks();
});
describe('useTestDefinitionForm', () => {
it('should initialize with default props', () => {
const { state } = useTestDefinitionForm();
expect(state.value.description.value).toBe('');
expect(state.value.name.value).toContain('My Test');
expect(state.value.tags.value).toEqual([]);
expect(state.value.evaluationWorkflow.value).toBe('');
});
it('should load test data', async () => {
const { loadTestData, state } = useTestDefinitionForm();
const fetchSpy = vi.spyOn(useTestDefinitionStore(), 'fetchAll');
const evaluationsStore = mockedStore(useTestDefinitionStore);
evaluationsStore.testDefinitionsById = {
[TEST_DEF_A.id]: TEST_DEF_A,
[TEST_DEF_B.id]: TEST_DEF_B,
};
await loadTestData(TEST_DEF_A.id, '123');
expect(fetchSpy).toBeCalled();
expect(state.value.name.value).toEqual(TEST_DEF_A.name);
expect(state.value.description.value).toEqual(TEST_DEF_A.description);
expect(state.value.tags.value).toEqual([TEST_DEF_A.annotationTagId]);
expect(state.value.evaluationWorkflow.value).toEqual(TEST_DEF_A.evaluationWorkflowId);
});
it('should gracefully handle loadTestData when no test definition found', async () => {
const { loadTestData, state } = useTestDefinitionForm();
const fetchSpy = vi.spyOn(useTestDefinitionStore(), 'fetchAll');
const evaluationsStore = mockedStore(useTestDefinitionStore);
evaluationsStore.testDefinitionsById = {};
await loadTestData('unknown-id', '123');
expect(fetchSpy).toBeCalled();
// Should remain unchanged since no definition found
expect(state.value.description.value).toBe('');
expect(state.value.name.value).toContain('My Test');
expect(state.value.tags.value).toEqual([]);
});
it('should handle errors while loading test data', async () => {
const { loadTestData } = useTestDefinitionForm();
const fetchSpy = vi
.spyOn(useTestDefinitionStore(), 'fetchAll')
.mockRejectedValue(new Error('Fetch Failed'));
const consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => {});
await loadTestData(TEST_DEF_A.id, '123');
expect(fetchSpy).toBeCalled();
expect(consoleErrorSpy).toBeCalledWith('Failed to load test data', expect.any(Error));
consoleErrorSpy.mockRestore();
});
it('should save a new test', async () => {
const { createTest, state } = useTestDefinitionForm();
const createSpy = vi.spyOn(useTestDefinitionStore(), 'create').mockResolvedValue(TEST_DEF_NEW);
state.value.name.value = TEST_DEF_NEW.name;
state.value.description.value = TEST_DEF_NEW.description ?? '';
const newTest = await createTest('123');
expect(createSpy).toBeCalledWith({
name: TEST_DEF_NEW.name,
description: TEST_DEF_NEW.description,
workflowId: '123',
});
expect(newTest).toEqual(TEST_DEF_NEW);
});
it('should handle errors when creating a new test', async () => {
const { createTest } = useTestDefinitionForm();
const createSpy = vi
.spyOn(useTestDefinitionStore(), 'create')
.mockRejectedValue(new Error('Create Failed'));
await expect(createTest('123')).rejects.toThrow('Create Failed');
expect(createSpy).toBeCalled();
});
it('should update an existing test', async () => {
const { updateTest, state } = useTestDefinitionForm();
const updatedBTest = {
...TEST_DEF_B,
updatedAt: '2022-01-01T00:00:00.000Z',
createdAt: '2022-01-01T00:00:00.000Z',
};
const updateSpy = vi.spyOn(useTestDefinitionStore(), 'update').mockResolvedValue(updatedBTest);
state.value.name.value = TEST_DEF_B.name;
state.value.description.value = TEST_DEF_B.description ?? '';
const updatedTest = await updateTest(TEST_DEF_A.id);
expect(updateSpy).toBeCalledWith({
id: TEST_DEF_A.id,
name: TEST_DEF_B.name,
description: TEST_DEF_B.description,
mockedNodes: [],
});
expect(updatedTest).toEqual(updatedBTest);
});
it('should throw an error if no testId is provided when updating a test', async () => {
const { updateTest } = useTestDefinitionForm();
await expect(updateTest('')).rejects.toThrow('Test ID is required for updating a test');
});
it('should handle errors when updating a test', async () => {
const { updateTest, state } = useTestDefinitionForm();
const updateSpy = vi
.spyOn(useTestDefinitionStore(), 'update')
.mockRejectedValue(new Error('Update Failed'));
state.value.name.value = 'Test';
state.value.description.value = 'Some description';
await expect(updateTest(TEST_DEF_A.id)).rejects.toThrow('Update Failed');
expect(updateSpy).toBeCalled();
});
it('should start editing a field', () => {
const { state, startEditing } = useTestDefinitionForm();
startEditing('name');
expect(state.value.name.isEditing).toBe(true);
expect(state.value.name.tempValue).toBe(state.value.name.value);
startEditing('tags');
expect(state.value.tags.isEditing).toBe(true);
expect(state.value.tags.tempValue).toEqual(state.value.tags.value);
});
it('should do nothing if startEditing is called while already editing', () => {
const { state, startEditing } = useTestDefinitionForm();
state.value.name.isEditing = true;
state.value.name.tempValue = 'Original Name';
startEditing('name');
// Should remain unchanged because it was already editing
expect(state.value.name.isEditing).toBe(true);
expect(state.value.name.tempValue).toBe('Original Name');
});
it('should save changes to a field', () => {
const { state, startEditing, saveChanges } = useTestDefinitionForm();
// Name
startEditing('name');
state.value.name.tempValue = 'New Name';
saveChanges('name');
expect(state.value.name.isEditing).toBe(false);
expect(state.value.name.value).toBe('New Name');
// Tags
startEditing('tags');
state.value.tags.tempValue = ['123'];
saveChanges('tags');
expect(state.value.tags.isEditing).toBe(false);
expect(state.value.tags.value).toEqual(['123']);
});
it('should cancel editing a field', () => {
const { state, startEditing, cancelEditing } = useTestDefinitionForm();
const originalName = state.value.name.value;
startEditing('name');
state.value.name.tempValue = 'New Name';
cancelEditing('name');
expect(state.value.name.isEditing).toBe(false);
expect(state.value.name.tempValue).toBe(originalName);
const originalTags = [...state.value.tags.value];
startEditing('tags');
state.value.tags.tempValue = ['123'];
cancelEditing('tags');
expect(state.value.tags.isEditing).toBe(false);
expect(state.value.tags.tempValue).toEqual(originalTags);
});
it('should handle keydown - Escape', () => {
const { state, startEditing, handleKeydown } = useTestDefinitionForm();
startEditing('name');
handleKeydown(new KeyboardEvent('keydown', { key: 'Escape' }), 'name');
expect(state.value.name.isEditing).toBe(false);
startEditing('tags');
handleKeydown(new KeyboardEvent('keydown', { key: 'Escape' }), 'tags');
expect(state.value.tags.isEditing).toBe(false);
});
it('should handle keydown - Enter', () => {
const { state, startEditing, handleKeydown } = useTestDefinitionForm();
startEditing('name');
state.value.name.tempValue = 'New Name';
handleKeydown(new KeyboardEvent('keydown', { key: 'Enter' }), 'name');
expect(state.value.name.isEditing).toBe(false);
expect(state.value.name.value).toBe('New Name');
startEditing('tags');
state.value.tags.tempValue = ['123'];
handleKeydown(new KeyboardEvent('keydown', { key: 'Enter' }), 'tags');
expect(state.value.tags.isEditing).toBe(false);
expect(state.value.tags.value).toEqual(['123']);
});
it('should not save changes when shift+Enter is pressed', () => {
const { state, startEditing, handleKeydown } = useTestDefinitionForm();
startEditing('name');
state.value.name.tempValue = 'New Name With Shift';
handleKeydown(new KeyboardEvent('keydown', { key: 'Enter', shiftKey: true }), 'name');
expect(state.value.name.isEditing).toBe(true);
expect(state.value.name.value).not.toBe('New Name With Shift');
});
});

View File

@@ -176,7 +176,7 @@ function onRetryMenuItemSelect(action: string): void {
<template #content>
<span>{{ locale.baseText('executionsList.evaluation') }}</span>
</template>
<FontAwesomeIcon :class="[$style.icon, $style.evaluation]" icon="tasks" />
<FontAwesomeIcon :class="[$style.icon, $style.evaluation]" icon="check-double" />
</N8nTooltip>
</div>
</router-link>

View File

@@ -18,7 +18,7 @@ import { parse } from 'flatted';
import { useToast } from '@/composables/useToast';
import type { useRouter } from 'vue-router';
import { useI18n } from '@/composables/useI18n';
import { TelemetryHelpers } from 'n8n-workflow';
import { TelemetryHelpers, EVALUATION_TRIGGER_NODE_TYPE } from 'n8n-workflow';
import type { IWorkflowBase, ExpressionError, IDataObject, IRunExecutionData } from 'n8n-workflow';
import { codeNodeEditorEventBus, globalLinkActionsEventBus } from '@/event-bus';
import { getTriggerNodeServiceName } from '@/utils/nodeTypesUtils';
@@ -94,6 +94,34 @@ export async function executionFinished(
}
}
// Implicit looping: This will re-trigger the evaluation trigger if it exists on a successful execution of the workflow.
if (execution.status === 'success' && execution.data?.startData?.destinationNode === undefined) {
// check if we have an evaluation trigger in our workflow and whether it has any run data
const evalTrigger = execution.workflowData.nodes.find(
(node) => node.type === EVALUATION_TRIGGER_NODE_TYPE,
);
const triggerRunData = evalTrigger
? execution?.data?.resultData?.runData[evalTrigger.name]
: undefined;
if (evalTrigger && triggerRunData !== undefined) {
const mainData = triggerRunData[0]?.data?.main[0];
const rowsLeft = mainData ? (mainData[0]?.json?._rowsLeft as number) : 0;
if (rowsLeft && rowsLeft > 0) {
// Find the button that belongs to the evaluation trigger, and click it.
const testId = `execute-workflow-button-${evalTrigger.name}`;
setTimeout(() => {
const button = Array.from(document.querySelectorAll('[data-test-id]')).filter((x) =>
(x as HTMLElement)?.dataset?.testId?.startsWith(testId),
)[0];
(button as HTMLElement)?.click();
}, 2);
}
}
}
const runExecutionData = getRunExecutionData(execution);
uiStore.setProcessingExecutionResults(false);

View File

@@ -217,7 +217,6 @@ export const SLACK_TRIGGER_NODE_TYPE = 'n8n-nodes-base.slackTrigger';
export const TELEGRAM_TRIGGER_NODE_TYPE = 'n8n-nodes-base.telegramTrigger';
export const FACEBOOK_LEAD_ADS_TRIGGER_NODE_TYPE = 'n8n-nodes-base.facebookLeadAdsTrigger';
export const RESPOND_TO_WEBHOOK_NODE_TYPE = 'n8n-nodes-base.respondToWebhook';
export const EVALUATION_TRIGGER_NODE_TYPE = 'n8n-nodes-base.evaluationTrigger';
export const CREDENTIAL_ONLY_NODE_PREFIX = 'n8n-creds-base';
export const CREDENTIAL_ONLY_HTTP_NODE_VERSION = 4.1;
@@ -279,6 +278,8 @@ export const NODE_CREATOR_OPEN_SOURCES: Record<
NODE_CONNECTION_DROP: 'node_connection_drop',
NOTICE_ERROR_MESSAGE: 'notice_error_message',
CONTEXT_MENU: 'context_menu',
ADD_EVALUATION_NODE_BUTTON: 'add_evaluation_node_button',
ADD_EVALUATION_TRIGGER_BUTTON: 'add_evaluation_trigger_button',
'': '',
};
export const CORE_NODES_CATEGORY = 'Core Nodes';
@@ -310,6 +311,7 @@ export const AI_CATEGORY_TEXT_SPLITTERS = 'Text Splitters';
export const AI_CATEGORY_OTHER_TOOLS = 'Other Tools';
export const AI_CATEGORY_ROOT_NODES = 'Root Nodes';
export const AI_CATEGORY_MCP_NODES = 'Model Context Protocol';
export const AI_EVALUATION = 'Evaluation';
export const AI_UNCATEGORIZED_CATEGORY = 'Miscellaneous';
export const AI_CODE_TOOL_LANGCHAIN_NODE_TYPE = '@n8n/n8n-nodes-langchain.toolCode';
export const AI_WORKFLOW_TOOL_LANGCHAIN_NODE_TYPE = '@n8n/n8n-nodes-langchain.toolWorkflow';
@@ -547,11 +549,9 @@ export const enum VIEWS {
COMMUNITY_NODES = 'CommunityNodes',
WORKFLOWS = 'WorkflowsView',
WORKFLOW_EXECUTIONS = 'WorkflowExecutions',
TEST_DEFINITION = 'TestDefinition',
TEST_DEFINITION_EDIT = 'TestDefinitionEdit',
TEST_DEFINITION_RUNS_COMPARE = 'TestDefinitionRunsCompare',
TEST_DEFINITION_RUNS_DETAIL = 'TestDefinitionRunsDetail',
NEW_TEST_DEFINITION = 'NewTestDefinition',
EVALUATION = 'Evaluation',
EVALUATION_EDIT = 'EvaluationEdit',
EVALUATION_RUNS_DETAIL = 'EvaluationRunsDetail',
USAGE = 'Usage',
LOG_STREAMING_SETTINGS = 'LogStreamingSettingsView',
SSO_SETTINGS = 'SSoSettings',
@@ -659,7 +659,7 @@ export const enum MAIN_HEADER_TABS {
WORKFLOW = 'workflow',
EXECUTIONS = 'executions',
SETTINGS = 'settings',
TEST_DEFINITION = 'testDefinition',
EVALUATION = 'evaluation',
}
export const CURL_IMPORT_NOT_SUPPORTED_PROTOCOLS = [
'ftp',
@@ -717,12 +717,6 @@ export const KEEP_AUTH_IN_NDV_FOR_NODES = [
export const MAIN_AUTH_FIELD_NAME = 'authentication';
export const NODE_RESOURCE_FIELD_NAME = 'resource';
export const EVALUATION_TRIGGER = {
name: '031-evaluation-trigger',
control: 'control',
variant: 'variant',
};
export const EASY_AI_WORKFLOW_EXPERIMENT = {
name: '026_easy_ai_workflow',
control: 'control',

View File

@@ -58,7 +58,7 @@
"generic.executions": "Executions",
"generic.tag_plural": "Tags",
"generic.tag": "Tag | {count} Tags",
"generic.tests": "Tests",
"generic.tests": "Evaluations",
"generic.optional": "optional",
"generic.or": "or",
"generic.clickToCopy": "Click to copy",
@@ -2994,142 +2994,115 @@
"communityPlusModal.notice": "Included features may change, but once unlocked, you'll keep them forever.",
"executeWorkflowTrigger.createNewSubworkflow": "Create a Sub-Workflow in {projectName}",
"executeWorkflowTrigger.createNewSubworkflow.noProject": "Create a New Sub-Workflow",
"testDefinition.edit.descriptionPlaceholder": "Enter test description",
"testDefinition.edit.showConfig": "Show config",
"testDefinition.edit.hideConfig": "Hide config",
"testDefinition.edit.backButtonTitle": "Back to Workflow Evaluation",
"testDefinition.edit.namePlaceholder": "Enter test name",
"testDefinition.edit.selectTag": "Select tag...",
"testDefinition.edit.tagsHelpText": "Executions with this tag will be added as test cases to this test.",
"testDefinition.edit.workflowSelectorLabel": "Use a second workflow to make the comparison",
"testDefinition.edit.workflowSelectorDisplayName": "Workflow",
"testDefinition.edit.workflowSelectorTitle": "Use a second workflow to make the comparison",
"testDefinition.edit.workflowSelectorHelpText": "This workflow will be called once for each test case.",
"testDefinition.edit.updateTest": "Update test",
"testDefinition.edit.saveTest": "Save test",
"testDefinition.edit.runTest": "Run test",
"testDefinition.edit.testSaved": "Test saved",
"testDefinition.edit.testSaveFailed": "Failed to save test",
"testDefinition.edit.description": "Description",
"testDefinition.edit.description.description": "Add details about what this test evaluates and what success looks like",
"testDefinition.edit.pinNodes.noNodes.title": "No nodes to pin",
"testDefinition.edit.pinNodes.noNodes.description": "Your workflow needs to have at least one node to run a test",
"testDefinition.edit.tagName": "Tag name",
"testDefinition.edit.step.intro": "When running a test",
"testDefinition.edit.step.executions": "1. Fetch benchmark executions | 1. Fetch {count} benchmark execution | 1. Fetch {count} benchmark executions",
"testDefinition.edit.step.tag": "Any past executions tagged {tag} are fetched",
"testDefinition.edit.step.tag.placeholder": "Enter new tag name",
"testDefinition.edit.step.tag.validation.required": "Tag name is required",
"testDefinition.edit.step.tag.validation.tooLong": "Tag name is too long",
"testDefinition.edit.step.executions.tooltip": "Past executions are used as benchmark data. Each one will be re-executed during the test to check whether performance has changed.",
"testDefinition.edit.step.mockedNodes": "2. Mock nodes |2. Mock {count} node |2. Mock {count} nodes",
"testDefinition.edit.step.nodes.tooltip": "Mocked nodes have their data replayed rather than being re-executed. Do this to avoid calling external services, or save time executing.",
"testDefinition.edit.step.reRunExecutions": "3. Re-run executions",
"testDefinition.edit.step.reRunExecutions.tooltip": "Each past execution is re-run using the latest version of the workflow being tested",
"testDefinition.edit.step.compareExecutions": "4. Compare each past and new execution",
"testDefinition.edit.step.compareExecutions.tooltip": "Each past execution is compared with its new equivalent to check how similar they are. This is done using a separate evaluation workflow: it receives the two execution versions as input, and outputs metrics.",
"testDefinition.edit.step.collapse": "Collapse",
"testDefinition.edit.step.configure": "Configure",
"testDefinition.edit.selectNodes": "Pin nodes to mock them",
"testDefinition.edit.modal.description": "Choose which past data to keep when re-running the execution(s). Any mocked node will be replayed rather than re-executed. The trigger is always mocked.",
"testDefinition.edit.runExecution": "Run execution",
"testDefinition.edit.pastRuns": "Past runs",
"testDefinition.edit.pastRuns.total": "No runs | Past run | Past runs",
"testDefinition.edit.nodesPinning.pinButtonTooltip": "Use benchmark data for this node during evaluation execution",
"testDefinition.edit.nodesPinning.pinButtonTooltip.pinned": "This node will not be re-executed",
"testDefinition.edit.nodesPinning.triggerTooltip": "Trigger nodes are mocked by default",
"testDefinition.edit.saving": "Saving...",
"testDefinition.edit.saved": "Test saved",
"testDefinition.list.testDeleted": "Test deleted",
"testDefinition.list.tests": "Tests",
"testDefinition.list.evaluations": "Evaluation",
"testDefinition.list.unitTests.badge": "Coming soon",
"testDefinition.list.unitTests.title": "Unit test",
"testDefinition.list.unitTests.description": "Validate workflow logic by checking for specific conditions",
"testDefinition.list.unitTests.cta": "Register interest",
"testDefinition.list.createNew": "Create new evaluation",
"testDefinition.list.runAll": "Run all evaluations",
"testDefinition.list.actionDescription": "Measure changes in output by comparing results over time (for AI workflows)",
"testDefinition.list.actionButton": "Create an Evaluation",
"testDefinition.list.actionButton.unregistered": "Unlock evaluation",
"testDefinition.list.actionDescription.registered": "Your plan allows one evaluation",
"testDefinition.list.actionDescription.unregistered": "Unlock a free test when you register",
"testDefinition.list.actionDescription.atLimit": "You've reached your evaluation limit, upgrade to add more",
"testDefinition.list.testRuns": "No test runs | {count} test run | {count} test runs",
"testDefinition.list.lastRun": "Ran",
"testDefinition.list.running": "Running",
"testDefinition.list.errorRate": "Error rate: {errorRate}",
"testDefinition.list.testStartError": "Failed to start test run",
"testDefinition.list.testStarted": "Test run started",
"testDefinition.list.testCancelled": "Test run cancelled",
"testDefinition.list.loadError": "Failed to load tests",
"testDefinition.list.item.tests": "No test cases | {count} test case | {count} test cases",
"testDefinition.list.item.missingFields": "No fields missing | {count} field missing| {count} fields missing",
"testDefinition.listRuns.status.new": "New",
"testDefinition.listRuns.status.running": "Running",
"testDefinition.listRuns.status.evaluating": "Evaluating",
"testDefinition.listRuns.status.completed": "Completed",
"testDefinition.listRuns.status.cancelled": "Cancelled",
"testDefinition.listRuns.status.error": "Error",
"testDefinition.listRuns.status.success": "Success",
"testDefinition.listRuns.status.warning": "Warning",
"testDefinition.listRuns.metricsOverTime": "Metrics over time",
"testDefinition.listRuns.status": "Status",
"testDefinition.listRuns.runNumber": "Run",
"testDefinition.listRuns.runDate": "Run date",
"testDefinition.listRuns.runStatus": "Run status",
"testDefinition.listRuns.noRuns": "No test runs",
"testDefinition.listRuns.noRuns.description": "Run a test to see the results here",
"testDefinition.listRuns.deleteRuns": "No runs to delete | Delete {count} run | Delete {count} runs",
"testDefinition.listRuns.noRuns.button": "Run Test",
"testDefinition.listRuns.error.noPastExecutions": "No executions added to the specified tag",
"testDefinition.listRuns.error.evaluationWorkflowNotFound": "Selected evaluation workflow does not exist. {link}.",
"testDefinition.listRuns.error.evaluationWorkflowNotFound.solution": "Fix test configuration",
"testDefinition.runDetail.ranAt": "Ran at",
"testDefinition.runDetail.testCase": "Test case",
"testDefinition.runDetail.testCase.id": "Test case ID",
"testDefinition.runDetail.testCase.status": "Test case status",
"testDefinition.runDetail.totalCases": "Total cases",
"testDefinition.runDetail.error.mockedNodeMissing": "Output for a mocked node does not exist in benchmark execution.{link}.",
"testDefinition.runDetail.error.mockedNodeMissing.solution": "Fix test configuration",
"testDefinition.runDetail.error.executionFailed": "Failed to execute workflow with benchmark trigger. {link}.",
"testDefinition.runDetail.error.executionFailed.solution": "View execution",
"testDefinition.runDetail.error.evaluationFailed": "Failed to execute the evaluation workflow. {link}.",
"testDefinition.runDetail.error.evaluationFailed.solution": "View evaluation execution",
"testDefinition.runDetail.error.triggerNoLongerExists": "Trigger in benchmark execution no longer exists in workflow.{link}.",
"testDefinition.runDetail.error.triggerNoLongerExists.solution": "View benchmark",
"testDefinition.runDetail.error.invalidMetrics": "Evaluation workflow returned invalid metrics. Only numeric values are expected. View evaluation execution. {link}.",
"testDefinition.runDetail.error.invalidMetrics.solution": "View evaluation execution",
"testDefinition.runTest": "Run Test",
"testDefinition.cancelTestRun": "Cancel Test Run",
"testDefinition.notImplemented": "This feature is not implemented yet!",
"testDefinition.viewDetails": "View Details",
"testDefinition.editTest": "Edit Test",
"testDefinition.deleteTest": "Delete Test",
"testDefinition.deleteTest.warning": "The test and all associated runs will be removed. This cannot be undone",
"testDefinition.testIsRunning": "Test is running. Please wait for it to finish.",
"testDefinition.completeConfig": "Complete the configuration below to run the test:",
"testDefinition.configError.noEvaluationTag": "No evaluation tag set",
"testDefinition.configError.noExecutionsAddedToTag": "No executions added to this tag",
"testDefinition.configError.noEvaluationWorkflow": "No evaluation workflow set",
"testDefinition.configError.noMetrics": "No metrics set",
"testDefinition.workflowInput.subworkflowName": "Evaluation workflow for {name}",
"testDefinition.workflowInput.subworkflowName.default": "My Evaluation Sub-Workflow",
"testDefinition.executions.addTo": "Add to Test",
"testDefinition.executions.addTo.new": "Add to Test",
"testDefinition.executions.addTo.existing": "Add to \"{name}\"",
"testDefinition.executions.addedTo": "Added to \"{name}\"",
"testDefinition.executions.removeFrom": "Remove from \"{name}\"",
"testDefinition.executions.removedFrom": "Execution removed from \"{name}\"",
"testDefinition.executions.toast.addedTo": "Go back to \"{name}\"",
"testDefinition.executions.tooltip.addTo": "Add to new test",
"testDefinition.executions.tooltip.noExecutions": "Evaluation executions can not be added to tests",
"testDefinition.executions.tooltip.onlySuccess": "Only successful executions can be added to tests",
"testDefinition.workflow.createNew": "Create new evaluation workflow",
"testDefinition.workflow.createNew.or": "or use existing evaluation sub-workflow",
"testDefinition.executions.toast.addedTo.title": "Execution added to test ",
"testDefinition.executions.toast.closeTab": "Close this tab",
"testDefinition.executions.toast.removedFrom.title": "Execution removed from test ",
"evaluation.listRuns.status.new": "New",
"evaluation.listRuns.status.running": "Running",
"evaluation.listRuns.status.evaluating": "Evaluating",
"evaluation.listRuns.status.completed": "Completed",
"evaluation.listRuns.status.cancelled": "Cancelled",
"evaluation.listRuns.status.error": "Error",
"evaluation.listRuns.status.success": "Success",
"evaluation.listRuns.status.warning": "Warning",
"evaluation.listRuns.metricsOverTime": "Metrics over time",
"evaluation.listRuns.status": "Status",
"evaluation.listRuns.runListHeader": "All runs",
"evaluation.listRuns.testCasesListHeader": "Run #{index}",
"evaluation.listRuns.runNumber": "Run",
"evaluation.listRuns.runDate": "Run date",
"evaluation.listRuns.runStatus": "Run status",
"evaluation.listRuns.noRuns": "No test runs",
"evaluation.listRuns.pastRuns.total": "No runs | All runs | All runs",
"evaluation.listRuns.noRuns.description": "Run a test to see the results here",
"evaluation.listRuns.deleteRuns": "No runs to delete | Delete {count} run | Delete {count} runs",
"evaluation.listRuns.noRuns.button": "Run Test",
"evaluation.listRuns.toast.error.fetchTestCases": "Failed to load run details",
"evaluation.listRuns.error.testCasesNotFound": "No matching rows in dataset{description}",
"evaluation.listRuns.error.testCasesNotFound.description": "Check any filters or limits set in the evaluation trigger",
"evaluation.listRuns.error.executionInterrupted": "Test run was interrupted",
"evaluation.listRuns.error.unknownError": "Execution error{description}",
"evaluation.listRuns.error.cantFetchTestRuns": "Couldnt fetch test runs",
"evaluation.listRuns.error.cantStartTestRun": "Couldnt start test run",
"evaluation.listRuns.error.unknownError.description": "Click for more details",
"evaluation.listRuns.error.evaluationTriggerNotFound": "Evaluation trigger missing",
"evaluation.listRuns.error.evaluationTriggerNotConfigured": "Evaluation trigger is not configured",
"evaluation.listRuns.error.evaluationTriggerDisabled": "Evaluation trigger is disabled",
"evaluation.listRuns.error.setOutputsNodeNotFound": "No 'Set outputs' node in workflow",
"evaluation.listRuns.error.setOutputsNodeNotConfigured": "'Set outputs' node is not configured",
"evaluation.listRuns.error.setMetricsNodeNotFound": "No 'Set metrics' node in workflow",
"evaluation.listRuns.error.setMetricsNodeNotConfigured": "'Set metrics' node is not configured",
"evaluation.listRuns.error.cantFetchTestCases": "Couldnt fetch test cases{description}",
"evaluation.listRuns.error.cantFetchTestCases.description": "Check the Google Sheet setup in the evaluation trigger",
"evaluation.runDetail.ranAt": "Ran at",
"evaluation.runDetail.testCase": "Test case",
"evaluation.runDetail.testCase.id": "Test case ID",
"evaluation.runDetail.testCase.status": "Test case status",
"evaluation.runDetail.totalCases": "Total cases",
"evaluation.runDetail.error.mockedNodeMissing": "Output for a mocked node does not exist in benchmark execution.{link}.",
"evaluation.runDetail.error.mockedNodeMissing.solution": "Fix test configuration",
"evaluation.runDetail.error.executionFailed": "Failed to execute workflow",
"evaluation.runDetail.error.executionFailed.solution": "View execution",
"evaluation.runDetail.error.datasetTriggerNotFound": "Dataset trigger does not exist in the workflow.{link}.",
"evaluation.runDetail.error.datasetTriggerNotFound.solution": "View workflow",
"evaluation.runDetail.error.invalidMetrics": "Evaluation metrics node returned invalid metrics. Only numeric values are expected. View workflow. {link}.",
"evaluation.runDetail.error.invalidMetrics.solution": "View workflow",
"evaluation.runDetail.error.unknownError": "An unknown error occurred",
"evaluation.runDetail.error.unknownError.solution": "View execution",
"evaluation.runDetail.error.noMetricsCollected": "No 'Set metrics' node executed",
"evaluation.runDetail.error.partialCasesFailed": "Finished with errors",
"evaluation.runTest": "Run Test",
"evaluation.cancelTestRun": "Cancel Test Run",
"evaluation.notImplemented": "This feature is not implemented yet!",
"evaluation.viewDetails": "View Details",
"evaluation.editTest": "Edit Test",
"evaluation.deleteTest": "Delete Test",
"evaluation.deleteTest.warning": "The test and all associated runs will be removed. This cannot be undone",
"evaluation.testIsRunning": "Test is running. Please wait for it to finish.",
"evaluation.completeConfig": "Complete the configuration below to run the test:",
"evaluation.configError.noEvaluationTag": "No evaluation tag set",
"evaluation.configError.noExecutionsAddedToTag": "No executions added to this tag",
"evaluation.configError.noEvaluationWorkflow": "No evaluation workflow set",
"evaluation.configError.noMetrics": "No metrics set",
"evaluation.workflowInput.subworkflowName": "Evaluation workflow for {name}",
"evaluation.workflowInput.subworkflowName.default": "My Evaluation Sub-Workflow",
"evaluation.executions.addTo": "Add to Test",
"evaluation.executions.addTo.new": "Add to Test",
"evaluation.executions.addTo.existing": "Add to \"{name}\"",
"evaluation.executions.addedTo": "Added to \"{name}\"",
"evaluation.executions.removeFrom": "Remove from \"{name}\"",
"evaluation.executions.removedFrom": "Execution removed from \"{name}\"",
"evaluation.executions.toast.addedTo": "Go back to \"{name}\"",
"evaluation.executions.tooltip.addTo": "Add to new test",
"evaluation.executions.tooltip.noExecutions": "Evaluation executions can not be added to tests",
"evaluation.executions.tooltip.onlySuccess": "Only successful executions can be added to tests",
"evaluation.workflow.createNew": "Create new evaluation workflow",
"evaluation.workflow.createNew.or": "or use existing evaluation sub-workflow",
"evaluation.executions.toast.addedTo.title": "Execution added to test ",
"evaluation.executions.toast.closeTab": "Close this tab",
"evaluation.executions.toast.removedFrom.title": "Execution removed from test ",
"evaluations.paywall.title": "Register to enable evaluation",
"evaluations.paywall.description": "Register your Community instance to unlock the evaluation feature",
"evaluations.paywall.cta": "Register instance",
"evaluations.setupWizard.title": "Test your AI workflow over multiple inputs",
"evaluations.setupWizard.description": "Evaluations measure performance against a test dataset.",
"evaluations.setupWizard.moreInfo": "More info",
"evaluations.setupWizard.stepHeader.optional": "Optional",
"evaluations.setupWizard.step1.title": "Wire up a test dataset",
"evaluations.setupWizard.step1.item1": "Set up a Google Sheet with one input per row",
"evaluations.setupWizard.step1.item2": "Add an evaluation trigger to your workflow and wire it up",
"evaluations.setupWizard.step1.button": "Add evaluation trigger",
"evaluations.setupWizard.step2.title": "Write workflow outputs back to dataset",
"evaluations.setupWizard.step2.item1": "Add a 'set outputs' operation to log each output back to Google Sheets",
"evaluations.setupWizard.step2.button": "Add 'set outputs' node",
"evaluations.setupWizard.step3.title": "Set up a quality score",
"evaluations.setupWizard.step3.item1": "Calculate a score, e.g. by comparing expected and actual outputs",
"evaluations.setupWizard.step3.item2": "Add a 'set metrics' operation to log the score",
"evaluations.setupWizard.step3.button": "Add 'Set metrics' node",
"evaluations.setupWizard.step3.skip": "Skip",
"evaluations.setupWizard.step3.notice": "Your plan supports custom metrics for one workflow only. {link}",
"evaluations.setupWizard.step3.notice.link": "See plans",
"evaluations.setupWizard.step4.title": "Run evaluation",
"evaluations.setupWizard.step4.button": "Run evaluation",
"evaluations.setupWizard.step4.altButton": "Run in editor",
"evaluations.setupWizard.limitReached": "Limit reached. Your plan includes custom metrics for one workflow only. Upgrade for unlimited use or delete the workflow with existing evaluation runs.",
"freeAi.credits.callout.claim.title": "Get {credits} free OpenAI API credits",
"freeAi.credits.callout.claim.button.label": "Claim credits",
"freeAi.credits.callout.success.title.part1": "Claimed {credits} free OpenAI API credits! Please note these free credits are only for the following models:",

View File

@@ -11,7 +11,7 @@ import { useSettingsStore } from '@/stores/settings.store';
import { useTemplatesStore } from '@/stores/templates.store';
import { useUIStore } from '@/stores/ui.store';
import { useSSOStore } from '@/stores/sso.store';
import { useTestDefinitionStore } from '@/stores/testDefinition.store.ee';
import { useEvaluationStore } from '@/stores/evaluation.store.ee';
import { EnterpriseEditionFeature, VIEWS, EDITABLE_CANVAS_VIEWS } from '@/constants';
import { useTelemetry } from '@/composables/useTelemetry';
import { middleware } from '@/utils/rbac/middleware';
@@ -20,7 +20,7 @@ import { initializeAuthenticatedFeatures, initializeCore } from '@/init';
import { tryToParseNumber } from '@/utils/typesUtils';
import { projectsRoutes } from '@/routes/projects.routes';
import { insightsRoutes } from '@/features/insights/insights.router';
import TestDefinitionRunDetailView from './views/TestDefinition/TestDefinitionRunDetailView.vue';
import TestRunDetailView from '@/views/Evaluations.ee/TestRunDetailView.vue';
const ChangePasswordView = async () => await import('./views/ChangePasswordView.vue');
const ErrorView = async () => await import('./views/ErrorView.vue');
@@ -62,14 +62,9 @@ const SettingsExternalSecrets = async () => await import('./views/SettingsExtern
const WorkerView = async () => await import('./views/WorkerView.vue');
const WorkflowHistory = async () => await import('@/views/WorkflowHistory.vue');
const WorkflowOnboardingView = async () => await import('@/views/WorkflowOnboardingView.vue');
const TestDefinitionListView = async () =>
await import('./views/TestDefinition/TestDefinitionListView.vue');
const TestDefinitionNewView = async () =>
await import('./views/TestDefinition/TestDefinitionNewView.vue');
const TestDefinitionEditView = async () =>
await import('./views/TestDefinition/TestDefinitionEditView.vue');
const TestDefinitionRootView = async () =>
await import('./views/TestDefinition/TestDefinitionRootView.vue');
const EvaluationsView = async () => await import('@/views/Evaluations.ee/EvaluationsView.vue');
const EvaluationRootView = async () =>
await import('@/views/Evaluations.ee/EvaluationsRootView.vue');
function getTemplatesRedirect(defaultRedirect: VIEWS[keyof VIEWS]): { name: string } | false {
const settingsStore = useSettingsStore();
@@ -264,48 +259,35 @@ export const routes: RouteRecordRaw[] = [
},
{
path: '/workflow/:name/evaluation',
name: VIEWS.EVALUATION,
components: {
default: TestDefinitionRootView,
default: EvaluationRootView,
header: MainHeader,
sidebar: MainSidebar,
},
props: true,
props: {
default: true,
},
meta: {
keepWorkflowAlive: true,
middleware: ['authenticated', 'custom'],
middlewareOptions: {
custom: () => useTestDefinitionStore().isFeatureEnabled,
custom: () => useEvaluationStore().isFeatureEnabled,
},
},
children: [
{
path: '',
name: VIEWS.TEST_DEFINITION,
component: TestDefinitionListView,
name: VIEWS.EVALUATION_EDIT,
component: EvaluationsView,
props: true,
},
{
path: 'new',
name: VIEWS.NEW_TEST_DEFINITION,
component: TestDefinitionNewView,
path: 'test-runs/:runId',
name: VIEWS.EVALUATION_RUNS_DETAIL,
component: TestRunDetailView,
props: true,
},
{
path: ':testId',
name: VIEWS.TEST_DEFINITION_EDIT,
props: true,
components: {
default: TestDefinitionEditView,
},
},
{
path: ':testId/runs/:runId',
name: VIEWS.TEST_DEFINITION_RUNS_DETAIL,
props: true,
components: {
default: TestDefinitionRunDetailView,
},
},
],
},
{

View File

@@ -0,0 +1,208 @@
import { createPinia, setActivePinia } from 'pinia';
import { useEvaluationStore } from '@/stores/evaluation.store.ee'; // Adjust the import path as necessary
import { useRootStore } from '@n8n/stores/useRootStore';
import { usePostHog } from '@/stores/posthog.store';
import { useAnnotationTagsStore } from '@/stores/tags.store';
import type { TestRunRecord } from '@/api/evaluation.ee';
import { mockedStore } from '@/__tests__/utils';
const { getTestRuns, getTestRun, startTestRun, deleteTestRun } = vi.hoisted(() => ({
getTestRuns: vi.fn(),
getTestRun: vi.fn(),
startTestRun: vi.fn(),
deleteTestRun: vi.fn(),
}));
vi.mock('@/api/evaluation.ee', () => ({
getTestRuns,
getTestRun,
startTestRun,
deleteTestRun,
}));
vi.mock('@n8n/stores/useRootStore', () => ({
useRootStore: vi.fn(() => ({
restApiContext: { instanceId: 'test-instance-id' },
})),
}));
const TEST_RUN: TestRunRecord = {
id: 'run1',
workflowId: '1',
status: 'completed',
metrics: { metric1: 0.75 },
createdAt: '2024-01-01',
updatedAt: '2024-01-01',
runAt: '2024-01-01',
completedAt: '2024-01-01',
};
describe('evaluation.store.ee', () => {
let store: ReturnType<typeof useEvaluationStore>;
let rootStoreMock: ReturnType<typeof useRootStore>;
let posthogStoreMock: ReturnType<typeof usePostHog>;
beforeEach(() => {
vi.restoreAllMocks();
setActivePinia(createPinia());
store = useEvaluationStore();
rootStoreMock = useRootStore();
posthogStoreMock = usePostHog();
mockedStore(useAnnotationTagsStore).fetchAll = vi.fn().mockResolvedValue([]);
getTestRuns.mockResolvedValue([TEST_RUN]);
getTestRun.mockResolvedValue(TEST_RUN);
startTestRun.mockResolvedValue({ success: true });
deleteTestRun.mockResolvedValue({ success: true });
});
test('Initialization', () => {
expect(store.testRunsById).toEqual({});
expect(store.isLoading).toBe(false);
});
describe('Computed Properties', () => {
test('isFeatureEnabled', () => {
posthogStoreMock.isFeatureEnabled = vi.fn().mockReturnValue(false);
expect(store.isFeatureEnabled).toBe(false);
posthogStoreMock.isFeatureEnabled = vi.fn().mockReturnValue(true);
expect(store.isFeatureEnabled).toBe(true);
});
});
describe('Test Runs', () => {
test('Fetching Test Runs', async () => {
const result = await store.fetchTestRuns('1');
expect(getTestRuns).toHaveBeenCalledWith(rootStoreMock.restApiContext, '1');
expect(store.testRunsById).toEqual({
run1: TEST_RUN,
});
expect(result).toEqual([TEST_RUN]);
});
test('Getting specific Test Run', async () => {
const params = { workflowId: '1', runId: 'run1' };
const result = await store.getTestRun(params);
expect(getTestRun).toHaveBeenCalledWith(rootStoreMock.restApiContext, params);
expect(store.testRunsById).toEqual({
run1: TEST_RUN,
});
expect(result).toEqual(TEST_RUN);
});
test('Starting Test Run', async () => {
const result = await store.startTestRun('1');
expect(startTestRun).toHaveBeenCalledWith(rootStoreMock.restApiContext, '1');
expect(result).toEqual({ success: true });
});
test('Deleting Test Run', async () => {
store.testRunsById = { run1: TEST_RUN };
const params = { workflowId: '1', runId: 'run1' };
const result = await store.deleteTestRun(params);
expect(deleteTestRun).toHaveBeenCalledWith(rootStoreMock.restApiContext, params);
expect(store.testRunsById).toEqual({});
expect(result).toEqual({ success: true });
});
test('Getting Test Runs by Test ID', () => {
store.testRunsById = {
run1: TEST_RUN,
run2: { ...TEST_RUN, id: 'run2', workflowId: '2' },
};
const runs = store.testRunsByWorkflowId['1'];
expect(runs).toEqual([TEST_RUN]);
});
});
describe('Polling Mechanism', () => {
beforeEach(() => {
vi.useFakeTimers();
});
afterEach(() => {
vi.useRealTimers();
});
test('should start polling for running test runs', async () => {
const runningTestRun = {
...TEST_RUN,
status: 'running',
};
getTestRuns.mockResolvedValueOnce([runningTestRun]);
// First call returns running status
getTestRun.mockResolvedValueOnce({
...runningTestRun,
status: 'running',
});
// Second call returns completed status
getTestRun.mockResolvedValueOnce({
...runningTestRun,
status: 'completed',
});
await store.fetchTestRuns('1');
expect(store.testRunsById).toEqual({
run1: runningTestRun,
});
// Advance timer to trigger the first poll
await vi.advanceTimersByTimeAsync(1000);
// Verify first poll happened
expect(getTestRun).toHaveBeenCalledWith(rootStoreMock.restApiContext, {
workflowId: '1',
runId: 'run1',
});
// Advance timer again
await vi.advanceTimersByTimeAsync(1000);
// Verify polling stopped after status changed to completed
expect(getTestRun).toHaveBeenCalledTimes(2);
});
test('should cleanup polling timeouts', async () => {
const runningTestRun = {
...TEST_RUN,
status: 'running',
};
getTestRuns.mockResolvedValueOnce([runningTestRun]);
getTestRun.mockResolvedValue({
...runningTestRun,
status: 'running',
});
await store.fetchTestRuns('1');
// Wait for the first poll to complete
await vi.runOnlyPendingTimersAsync();
// Clear mock calls from initial setup
getTestRun.mockClear();
store.cleanupPolling();
// Advance timer
await vi.advanceTimersByTimeAsync(1000);
// Verify no more polling happened after cleanup
expect(getTestRun).not.toHaveBeenCalled();
});
});
});

View File

@@ -0,0 +1,209 @@
import { defineStore } from 'pinia';
import { computed, ref } from 'vue';
import { useRootStore } from '@n8n/stores/useRootStore';
import * as evaluationsApi from '@/api/evaluation.ee';
import type { TestCaseExecutionRecord, TestRunRecord } from '@/api/evaluation.ee';
import { usePostHog } from './posthog.store';
import { WORKFLOW_EVALUATION_EXPERIMENT } from '@/constants';
import { STORES } from '@n8n/stores';
import { useWorkflowsStore } from '@/stores/workflows.store';
import { EVALUATION_NODE_TYPE, EVALUATION_TRIGGER_NODE_TYPE, NodeHelpers } from 'n8n-workflow';
import { useNodeTypesStore } from '@/stores/nodeTypes.store';
import { useSettingsStore } from '@/stores/settings.store';
export const useEvaluationStore = defineStore(
STORES.EVALUATION,
() => {
// State
const loadingTestRuns = ref(false);
const fetchedAll = ref(false);
const testRunsById = ref<Record<string, TestRunRecord>>({});
const testCaseExecutionsById = ref<Record<string, TestCaseExecutionRecord>>({});
const pollingTimeouts = ref<Record<string, NodeJS.Timeout>>({});
// Store instances
const posthogStore = usePostHog();
const rootStore = useRootStore();
const workflowsStore = useWorkflowsStore();
const nodeTypesStore = useNodeTypesStore();
const settingsStore = useSettingsStore();
// Computed
// Enable with `window.featureFlags.override('025_workflow_evaluation', true)`
const isFeatureEnabled = computed(() =>
posthogStore.isFeatureEnabled(WORKFLOW_EVALUATION_EXPERIMENT),
);
const isEvaluationEnabled = computed(
() =>
posthogStore.isFeatureEnabled(WORKFLOW_EVALUATION_EXPERIMENT) &&
settingsStore.settings.evaluation.quota !== 0,
);
const isLoading = computed(() => loadingTestRuns.value);
const testRunsByWorkflowId = computed(() => {
return Object.values(testRunsById.value).reduce(
(acc: Record<string, TestRunRecord[]>, run) => {
if (!acc[run.workflowId]) {
acc[run.workflowId] = [];
}
acc[run.workflowId].push(run);
return acc;
},
{},
);
});
const evaluationTriggerExists = computed(() => {
return workflowsStore.workflow.nodes.some(
(node) => node.type === EVALUATION_TRIGGER_NODE_TYPE,
);
});
function evaluationNodeExist(operation: string) {
return workflowsStore.workflow.nodes.some((node) => {
if (node.type !== EVALUATION_NODE_TYPE) {
return false;
}
const nodeType = nodeTypesStore.getNodeType(node.type, node.typeVersion);
if (!nodeType) return false;
const nodeParameters = NodeHelpers.getNodeParameters(
nodeType.properties,
node.parameters,
true,
false,
node,
nodeType,
);
return nodeParameters?.operation === operation;
});
}
const evaluationSetMetricsNodeExist = computed(() => {
return evaluationNodeExist('setMetrics');
});
const evaluationSetOutputsNodeExist = computed(() => {
return evaluationNodeExist('setOutputs');
});
// Methods
const fetchTestCaseExecutions = async (params: { workflowId: string; runId: string }) => {
const testCaseExecutions = await evaluationsApi.getTestCaseExecutions(
rootStore.restApiContext,
params.workflowId,
params.runId,
);
testCaseExecutions.forEach((testCaseExecution) => {
testCaseExecutionsById.value[testCaseExecution.id] = testCaseExecution;
});
return testCaseExecutions;
};
// Test Runs Methods
const fetchTestRuns = async (workflowId: string) => {
loadingTestRuns.value = true;
try {
const runs = await evaluationsApi.getTestRuns(rootStore.restApiContext, workflowId);
runs.forEach((run) => {
testRunsById.value[run.id] = run;
if (['running', 'new'].includes(run.status)) {
startPollingTestRun(workflowId, run.id);
}
});
return runs;
} finally {
loadingTestRuns.value = false;
}
};
const getTestRun = async (params: { workflowId: string; runId: string }) => {
const run = await evaluationsApi.getTestRun(rootStore.restApiContext, params);
testRunsById.value[run.id] = run;
return run;
};
const startTestRun = async (workflowId: string) => {
const result = await evaluationsApi.startTestRun(rootStore.restApiContext, workflowId);
return result;
};
const cancelTestRun = async (workflowId: string, testRunId: string) => {
const result = await evaluationsApi.cancelTestRun(
rootStore.restApiContext,
workflowId,
testRunId,
);
return result;
};
const deleteTestRun = async (params: { workflowId: string; runId: string }) => {
const result = await evaluationsApi.deleteTestRun(rootStore.restApiContext, params);
if (result.success) {
const { [params.runId]: deleted, ...rest } = testRunsById.value;
testRunsById.value = rest;
}
return result;
};
// TODO: This is a temporary solution to poll for test run status.
// We should use a more efficient polling mechanism in the future.
const startPollingTestRun = (workflowId: string, runId: string) => {
const poll = async () => {
try {
const run = await getTestRun({ workflowId, runId });
if (['running', 'new'].includes(run.status)) {
pollingTimeouts.value[runId] = setTimeout(poll, 1000);
} else {
delete pollingTimeouts.value[runId];
}
} catch (error) {
// If the API call fails, continue polling
pollingTimeouts.value[runId] = setTimeout(poll, 1000);
}
};
void poll();
};
const cleanupPolling = () => {
Object.values(pollingTimeouts.value).forEach((timeout) => {
clearTimeout(timeout);
});
pollingTimeouts.value = {};
};
return {
// State
fetchedAll,
testRunsById,
testCaseExecutionsById,
// Computed
isLoading,
isFeatureEnabled,
isEvaluationEnabled,
testRunsByWorkflowId,
evaluationTriggerExists,
evaluationSetMetricsNodeExist,
evaluationSetOutputsNodeExist,
// Methods
fetchTestCaseExecutions,
fetchTestRuns,
getTestRun,
startTestRun,
cancelTestRun,
deleteTestRun,
cleanupPolling,
};
},
{},
);

View File

@@ -220,6 +220,45 @@ export const useNodeCreatorStore = defineStore(STORES.NODE_CREATOR, () => {
});
}
function openNodeCreatorForActions(node: string, eventSource?: NodeCreatorOpenSource) {
const actionNode = allNodeCreatorNodes.value.find((i) => i.key === node);
if (!actionNode) {
return;
}
const nodeActions = actions.value[actionNode.key];
const transformedActions = nodeActions?.map((a) =>
transformNodeType(a, actionNode.properties.displayName, 'action'),
);
ndvStore.activeNodeName = null;
setSelectedView(REGULAR_NODE_CREATOR_VIEW);
setNodeCreatorState({
source: eventSource,
createNodeActive: true,
nodeCreatorView: REGULAR_NODE_CREATOR_VIEW,
});
setTimeout(() => {
useViewStacks().pushViewStack(
{
subcategory: '*',
title: actionNode.properties.displayName,
nodeIcon: {
type: 'icon',
name: 'check-double',
},
rootView: 'Regular',
mode: 'actions',
items: transformedActions,
},
{ resetStacks: true },
);
});
}
function getNodeCreatorFilter(nodeName: string, outputType?: NodeConnectionType) {
let filter;
const workflow = workflowsStore.getCurrentWorkflow();
@@ -411,6 +450,7 @@ export const useNodeCreatorStore = defineStore(STORES.NODE_CREATOR, () => {
openSelectiveNodeCreator,
openNodeCreatorForConnectingNode,
openNodeCreatorForTriggerNodes,
openNodeCreatorForActions,
onCreatorOpened,
onNodeFilterChanged,
onCategoryExpanded,

View File

@@ -1,488 +0,0 @@
import { createPinia, setActivePinia } from 'pinia';
import { useTestDefinitionStore } from '@/stores/testDefinition.store.ee'; // Adjust the import path as necessary
import { useRootStore } from '@n8n/stores/useRootStore';
import { usePostHog } from '@/stores/posthog.store';
import { useAnnotationTagsStore } from '@/stores/tags.store';
import type { TestDefinitionRecord, TestRunRecord } from '@/api/testDefinition.ee';
import { mockedStore } from '@/__tests__/utils';
const {
createTestDefinition,
deleteTestDefinition,
getTestDefinitions,
updateTestDefinition,
getTestRuns,
getTestRun,
startTestRun,
deleteTestRun,
} = vi.hoisted(() => ({
getTestDefinitions: vi.fn(),
createTestDefinition: vi.fn(),
updateTestDefinition: vi.fn(),
deleteTestDefinition: vi.fn(),
getTestRuns: vi.fn(),
getTestRun: vi.fn(),
startTestRun: vi.fn(),
deleteTestRun: vi.fn(),
}));
vi.mock('@/api/testDefinition.ee', () => ({
createTestDefinition,
deleteTestDefinition,
getTestDefinitions,
updateTestDefinition,
getTestRuns,
getTestRun,
startTestRun,
deleteTestRun,
}));
vi.mock('@n8n/stores/useRootStore', () => ({
useRootStore: vi.fn(() => ({
restApiContext: { instanceId: 'test-instance-id' },
})),
}));
const TEST_DEF_A: TestDefinitionRecord = {
id: '1',
name: 'Test Definition A',
workflowId: '123',
description: 'Description A',
createdAt: '2023-01-01T00:00:00.000Z',
};
const TEST_DEF_B: TestDefinitionRecord = {
id: '2',
name: 'Test Definition B',
workflowId: '123',
description: 'Description B',
createdAt: '2023-01-01T00:00:00.000Z',
};
const TEST_DEF_NEW: TestDefinitionRecord = {
id: '3',
name: 'New Test Definition',
workflowId: '123',
description: 'New Description',
createdAt: '2023-01-01T00:00:00.000Z',
};
const TEST_RUN: TestRunRecord = {
id: 'run1',
testDefinitionId: '1',
status: 'completed',
metrics: { metric1: 0.75 },
createdAt: '2024-01-01',
updatedAt: '2024-01-01',
runAt: '2024-01-01',
completedAt: '2024-01-01',
};
describe('testDefinition.store.ee', () => {
let store: ReturnType<typeof useTestDefinitionStore>;
let rootStoreMock: ReturnType<typeof useRootStore>;
let posthogStoreMock: ReturnType<typeof usePostHog>;
beforeEach(() => {
vi.restoreAllMocks();
setActivePinia(createPinia());
store = useTestDefinitionStore();
rootStoreMock = useRootStore();
posthogStoreMock = usePostHog();
mockedStore(useAnnotationTagsStore).fetchAll = vi.fn().mockResolvedValue([]);
getTestDefinitions.mockResolvedValue({
count: 2,
testDefinitions: [TEST_DEF_A, TEST_DEF_B],
});
createTestDefinition.mockResolvedValue(TEST_DEF_NEW);
deleteTestDefinition.mockResolvedValue({ success: true });
getTestRuns.mockResolvedValue([TEST_RUN]);
getTestRun.mockResolvedValue(TEST_RUN);
startTestRun.mockResolvedValue({ success: true });
deleteTestRun.mockResolvedValue({ success: true });
});
test('Initialization', () => {
expect(store.testDefinitionsById).toEqual({});
expect(store.isLoading).toBe(false);
expect(store.hasTestDefinitions).toBe(false);
});
describe('Test Definitions', () => {
test('Fetching Test Definitions', async () => {
expect(store.isLoading).toBe(false);
const result = await store.fetchAll({ workflowId: '123' });
expect(getTestDefinitions).toHaveBeenCalledWith(rootStoreMock.restApiContext, {
workflowId: '123',
});
expect(store.testDefinitionsById).toEqual({
'1': TEST_DEF_A,
'2': TEST_DEF_B,
});
expect(store.isLoading).toBe(false);
expect(result).toEqual([TEST_DEF_A, TEST_DEF_B]);
});
test('Fetching Test Definitions with force flag', async () => {
expect(store.isLoading).toBe(false);
const result = await store.fetchAll({ force: true, workflowId: '123' });
expect(getTestDefinitions).toHaveBeenCalledWith(rootStoreMock.restApiContext, {
workflowId: '123',
});
expect(store.testDefinitionsById).toEqual({
'1': TEST_DEF_A,
'2': TEST_DEF_B,
});
expect(store.isLoading).toBe(false);
expect(result).toEqual([TEST_DEF_A, TEST_DEF_B]);
});
test('Fetching Test Definitions when already fetched', async () => {
store.fetchedAll = true;
const result = await store.fetchAll();
expect(getTestDefinitions).not.toHaveBeenCalled();
expect(store.testDefinitionsById).toEqual({});
expect(result).toEqual({
count: 0,
testDefinitions: [],
});
});
test('Upserting Test Definitions - New Definition', () => {
const newDefinition = TEST_DEF_NEW;
store.upsertTestDefinitions([newDefinition]);
expect(store.testDefinitionsById).toEqual({
'3': TEST_DEF_NEW,
});
});
test('Upserting Test Definitions - Existing Definition', () => {
store.testDefinitionsById = {
'1': TEST_DEF_A,
};
const updatedDefinition = {
id: '1',
name: 'Updated Test Definition A',
description: 'Updated Description A',
workflowId: '123',
createdAt: '2023-01-01T00:00:00.000Z',
};
store.upsertTestDefinitions([updatedDefinition]);
expect(store.testDefinitionsById).toEqual({
1: updatedDefinition,
});
});
test('Creating a Test Definition', async () => {
const params = {
name: 'New Test Definition',
workflowId: 'test-workflow-id',
evaluationWorkflowId: 'test-evaluation-workflow-id',
description: 'New Description',
};
const result = await store.create(params);
expect(createTestDefinition).toHaveBeenCalledWith(rootStoreMock.restApiContext, params);
expect(store.testDefinitionsById).toEqual({
'3': TEST_DEF_NEW,
});
expect(result).toEqual(TEST_DEF_NEW);
});
test('Updating a Test Definition', async () => {
store.testDefinitionsById = {
'1': TEST_DEF_A,
'2': TEST_DEF_B,
};
const params = {
id: '1',
name: 'Updated Test Definition A',
description: 'Updated Description A',
workflowId: '123',
};
updateTestDefinition.mockResolvedValue(params);
const result = await store.update(params);
expect(updateTestDefinition).toHaveBeenCalledWith(rootStoreMock.restApiContext, '1', {
name: 'Updated Test Definition A',
description: 'Updated Description A',
workflowId: '123',
});
expect(store.testDefinitionsById).toEqual({
'1': { ...TEST_DEF_A, ...params },
'2': TEST_DEF_B,
});
expect(result).toEqual(params);
});
test('Deleting a Test Definition', () => {
store.testDefinitionsById = {
'1': TEST_DEF_A,
'2': TEST_DEF_B,
};
store.deleteTestDefinition('1');
expect(store.testDefinitionsById).toEqual({
'2': TEST_DEF_B,
});
});
test('Deleting a Test Definition by ID', async () => {
store.testDefinitionsById = {
'1': TEST_DEF_A,
};
const result = await store.deleteById('1');
expect(deleteTestDefinition).toHaveBeenCalledWith(rootStoreMock.restApiContext, '1');
expect(store.testDefinitionsById).toEqual({});
expect(result).toBe(true);
});
});
describe('Computed Properties', () => {
test('hasTestDefinitions', () => {
store.testDefinitionsById = {};
expect(store.hasTestDefinitions).toBe(false);
store.testDefinitionsById = {
'1': TEST_DEF_A,
};
expect(store.hasTestDefinitions).toBe(true);
});
test('isFeatureEnabled', () => {
posthogStoreMock.isFeatureEnabled = vi.fn().mockReturnValue(false);
expect(store.isFeatureEnabled).toBe(false);
posthogStoreMock.isFeatureEnabled = vi.fn().mockReturnValue(true);
expect(store.isFeatureEnabled).toBe(true);
});
test('allTestDefinitionsByWorkflowId', () => {
store.testDefinitionsById = {
'1': { ...TEST_DEF_A, workflowId: 'workflow1' },
'2': { ...TEST_DEF_B, workflowId: 'workflow1' },
'3': { ...TEST_DEF_NEW, workflowId: 'workflow2' },
};
expect(store.allTestDefinitionsByWorkflowId).toEqual({
workflow1: [
{ ...TEST_DEF_A, workflowId: 'workflow1' },
{ ...TEST_DEF_B, workflowId: 'workflow1' },
],
workflow2: [{ ...TEST_DEF_NEW, workflowId: 'workflow2' }],
});
});
test('lastRunByTestId', () => {
const olderRun = {
...TEST_RUN,
id: 'run2',
testDefinitionId: '1',
updatedAt: '2023-12-31',
};
const newerRun = {
...TEST_RUN,
id: 'run3',
testDefinitionId: '2',
updatedAt: '2024-01-02',
};
store.testRunsById = {
run1: { ...TEST_RUN, testDefinitionId: '1' },
run2: olderRun,
run3: newerRun,
};
expect(store.lastRunByTestId).toEqual({
'1': TEST_RUN,
'2': newerRun,
});
});
test('lastRunByTestId with no runs', () => {
store.testRunsById = {};
expect(store.lastRunByTestId).toEqual({});
});
});
describe('Error Handling', () => {
test('create', async () => {
createTestDefinition.mockRejectedValue(new Error('Create failed'));
await expect(
store.create({ name: 'New Test Definition', workflowId: 'test-workflow-id' }),
).rejects.toThrow('Create failed');
});
test('update', async () => {
updateTestDefinition.mockRejectedValue(new Error('Update failed'));
await expect(store.update({ id: '1', name: 'Updated Test Definition A' })).rejects.toThrow(
'Update failed',
);
});
test('deleteById', async () => {
deleteTestDefinition.mockResolvedValue({ success: false });
const result = await store.deleteById('1');
expect(result).toBe(false);
});
});
describe('Test Runs', () => {
test('Fetching Test Runs', async () => {
const result = await store.fetchTestRuns('1');
expect(getTestRuns).toHaveBeenCalledWith(rootStoreMock.restApiContext, '1');
expect(store.testRunsById).toEqual({
run1: TEST_RUN,
});
expect(result).toEqual([TEST_RUN]);
});
test('Getting specific Test Run', async () => {
const params = { testDefinitionId: '1', runId: 'run1' };
const result = await store.getTestRun(params);
expect(getTestRun).toHaveBeenCalledWith(rootStoreMock.restApiContext, params);
expect(store.testRunsById).toEqual({
run1: TEST_RUN,
});
expect(result).toEqual(TEST_RUN);
});
test('Starting Test Run', async () => {
const result = await store.startTestRun('1');
expect(startTestRun).toHaveBeenCalledWith(rootStoreMock.restApiContext, '1');
expect(result).toEqual({ success: true });
});
test('Deleting Test Run', async () => {
store.testRunsById = { run1: TEST_RUN };
const params = { testDefinitionId: '1', runId: 'run1' };
const result = await store.deleteTestRun(params);
expect(deleteTestRun).toHaveBeenCalledWith(rootStoreMock.restApiContext, params);
expect(store.testRunsById).toEqual({});
expect(result).toEqual({ success: true });
});
test('Getting Test Runs by Test ID', () => {
store.testRunsById = {
run1: TEST_RUN,
run2: { ...TEST_RUN, id: 'run2', testDefinitionId: '2' },
};
const runs = store.testRunsByTestId['1'];
expect(runs).toEqual([TEST_RUN]);
});
});
describe('Polling Mechanism', () => {
beforeEach(() => {
vi.useFakeTimers();
});
afterEach(() => {
vi.useRealTimers();
});
test('should start polling for running test runs', async () => {
const runningTestRun = {
...TEST_RUN,
status: 'running',
};
getTestRuns.mockResolvedValueOnce([runningTestRun]);
// First call returns running status
getTestRun.mockResolvedValueOnce({
...runningTestRun,
status: 'running',
});
// Second call returns completed status
getTestRun.mockResolvedValueOnce({
...runningTestRun,
status: 'completed',
});
await store.fetchTestRuns('1');
expect(store.testRunsById).toEqual({
run1: runningTestRun,
});
// Advance timer to trigger the first poll
await vi.advanceTimersByTimeAsync(1000);
// Verify first poll happened
expect(getTestRun).toHaveBeenCalledWith(rootStoreMock.restApiContext, {
testDefinitionId: '1',
runId: 'run1',
});
// Advance timer again
await vi.advanceTimersByTimeAsync(1000);
// Verify polling stopped after status changed to completed
expect(getTestRun).toHaveBeenCalledTimes(2);
});
test('should cleanup polling timeouts', async () => {
const runningTestRun = {
...TEST_RUN,
status: 'running',
};
getTestRuns.mockResolvedValueOnce([runningTestRun]);
getTestRun.mockResolvedValue({
...runningTestRun,
status: 'running',
});
await store.fetchTestRuns('1');
// Wait for the first poll to complete
await vi.runOnlyPendingTimersAsync();
// Clear mock calls from initial setup
getTestRun.mockClear();
store.cleanupPolling();
// Advance timer
await vi.advanceTimersByTimeAsync(1000);
// Verify no more polling happened after cleanup
expect(getTestRun).not.toHaveBeenCalled();
});
});
});

View File

@@ -1,420 +0,0 @@
import { defineStore } from 'pinia';
import { computed, ref } from 'vue';
import { useRootStore } from '@n8n/stores/useRootStore';
import * as testDefinitionsApi from '@/api/testDefinition.ee';
import type {
TestCaseExecutionRecord,
TestDefinitionRecord,
TestRunRecord,
} from '@/api/testDefinition.ee';
import { usePostHog } from './posthog.store';
import { WORKFLOW_EVALUATION_EXPERIMENT } from '@/constants';
import { STORES } from '@n8n/stores';
import { useAnnotationTagsStore } from './tags.store';
import { useI18n } from '@/composables/useI18n';
type FieldIssue = { field: string; message: string };
export const useTestDefinitionStore = defineStore(
STORES.TEST_DEFINITION,
() => {
// State
const testDefinitionsById = ref<Record<string, TestDefinitionRecord>>({});
const loading = ref(false);
const fetchedAll = ref(false);
const testRunsById = ref<Record<string, TestRunRecord>>({});
const testCaseExecutionsById = ref<Record<string, TestCaseExecutionRecord>>({});
const pollingTimeouts = ref<Record<string, NodeJS.Timeout>>({});
const fieldsIssues = ref<Record<string, FieldIssue[]>>({});
// Store instances
const posthogStore = usePostHog();
const rootStore = useRootStore();
const tagsStore = useAnnotationTagsStore();
const locale = useI18n();
// Computed
const allTestDefinitions = computed(() => {
return Object.values(testDefinitionsById.value).sort((a, b) =>
(a.name ?? '').localeCompare(b.name ?? ''),
);
});
const allTestDefinitionsByWorkflowId = computed(() => {
return Object.values(testDefinitionsById.value).reduce(
(acc: Record<string, TestDefinitionRecord[]>, test) => {
if (!acc[test.workflowId]) {
acc[test.workflowId] = [];
}
acc[test.workflowId].push(test);
return acc;
},
{},
);
});
// Enable with `window.featureFlags.override('025_workflow_evaluation', true)`
const isFeatureEnabled = computed(() =>
posthogStore.isFeatureEnabled(WORKFLOW_EVALUATION_EXPERIMENT),
);
const isLoading = computed(() => loading.value);
const hasTestDefinitions = computed(() => Object.keys(testDefinitionsById.value).length > 0);
const testRunsByTestId = computed(() => {
return Object.values(testRunsById.value).reduce(
(acc: Record<string, TestRunRecord[]>, run) => {
if (!acc[run.testDefinitionId]) {
acc[run.testDefinitionId] = [];
}
acc[run.testDefinitionId].push(run);
return acc;
},
{},
);
});
const lastRunByTestId = computed(() => {
const grouped = Object.values(testRunsById.value).reduce(
(acc: Record<string, TestRunRecord[]>, run) => {
if (!acc[run.testDefinitionId]) {
acc[run.testDefinitionId] = [];
}
acc[run.testDefinitionId].push(run);
return acc;
},
{},
);
return Object.entries(grouped).reduce(
(acc: Record<string, TestRunRecord | null>, [testId, runs]) => {
acc[testId] =
runs.sort(
(a, b) => new Date(b.updatedAt).getTime() - new Date(a.updatedAt).getTime(),
)[0] || null;
return acc;
},
{},
);
});
const getFieldIssues = (testId: string) => fieldsIssues.value[testId] || [];
// Methods
const setAllTestDefinitions = (definitions: TestDefinitionRecord[]) => {
testDefinitionsById.value = definitions.reduce(
(acc: Record<string, TestDefinitionRecord>, def: TestDefinitionRecord) => {
acc[def.id] = def;
return acc;
},
{},
);
};
/**
* Upserts test definitions in the store.
* @param toUpsertDefinitions - An array of test definitions to upsert.
*/
const upsertTestDefinitions = (toUpsertDefinitions: TestDefinitionRecord[]) => {
toUpsertDefinitions.forEach((toUpsertDef) => {
const defId = toUpsertDef.id;
if (!defId) throw Error('ID is required for upserting');
const currentDef = testDefinitionsById.value[defId];
testDefinitionsById.value = {
...testDefinitionsById.value,
[defId]: {
...currentDef,
...toUpsertDef,
},
};
});
};
const deleteTestDefinition = (id: string) => {
const { [id]: deleted, ...rest } = testDefinitionsById.value;
testDefinitionsById.value = rest;
};
const fetchRunsForAllTests = async () => {
const testDefinitions = Object.values(testDefinitionsById.value);
try {
await Promise.all(testDefinitions.map(async (testDef) => await fetchTestRuns(testDef.id)));
} catch (error) {
console.error('Error fetching test runs:', error);
}
};
const fetchTestDefinition = async (id: string) => {
const testDefinition = await testDefinitionsApi.getTestDefinition(
rootStore.restApiContext,
id,
);
testDefinitionsById.value[testDefinition.id] = testDefinition;
updateRunFieldIssues(id);
return testDefinition;
};
const fetchTestDefinitionsByWorkflowId = async (workflowId: string) => {
const testDefinitions = await testDefinitionsApi.getTestDefinitions(
rootStore.restApiContext,
{ workflowId },
);
setAllTestDefinitions(testDefinitions.testDefinitions);
return testDefinitions.testDefinitions;
};
const fetchTestCaseExecutions = async (params: { testDefinitionId: string; runId: string }) => {
const testCaseExecutions = await testDefinitionsApi.getTestCaseExecutions(
rootStore.restApiContext,
params.testDefinitionId,
params.runId,
);
testCaseExecutions.forEach((testCaseExecution) => {
testCaseExecutionsById.value[testCaseExecution.id] = testCaseExecution;
});
return testCaseExecutions;
};
/**
* Fetches all test definitions from the API.
* @param {boolean} force - If true, fetches the definitions from the API even if they were already fetched before.
*/
const fetchAll = async (params?: { force?: boolean; workflowId?: string }) => {
const { force = false, workflowId } = params ?? {};
if (!force && fetchedAll.value && !workflowId) {
const testDefinitions = Object.values(testDefinitionsById.value);
return {
count: testDefinitions.length,
testDefinitions,
};
}
loading.value = true;
try {
if (!workflowId) {
return;
}
const retrievedDefinitions = await fetchTestDefinitionsByWorkflowId(workflowId);
fetchedAll.value = true;
await Promise.all([
tagsStore.fetchAll({ force: true, withUsageCount: true }),
fetchRunsForAllTests(),
]);
return retrievedDefinitions;
} finally {
loading.value = false;
}
};
const fetchExampleEvaluationInput = async (testId: string, annotationTagId: string) => {
return await testDefinitionsApi.getExampleEvaluationInput(
rootStore.restApiContext,
testId,
annotationTagId,
);
};
/**
* Creates a new test definition using the provided parameters.
*
* @param {Object} params - An object containing the necessary parameters to create a test definition.
* @param {string} params.name - The name of the new test definition.
* @param {string} params.workflowId - The ID of the workflow associated with the test definition.
* @returns {Promise<TestDefinitionRecord>} A promise that resolves to the newly created test definition.
* @throws {Error} Throws an error if there is a problem creating the test definition.
*/
const create = async (params: { name: string; workflowId: string }) => {
const createdDefinition = await testDefinitionsApi.createTestDefinition(
rootStore.restApiContext,
params,
);
upsertTestDefinitions([createdDefinition]);
updateRunFieldIssues(createdDefinition.id);
return createdDefinition;
};
const update = async (params: Partial<TestDefinitionRecord>) => {
if (!params.id) throw new Error('ID is required to update a test definition');
const { id, ...updateParams } = params;
const updatedDefinition = await testDefinitionsApi.updateTestDefinition(
rootStore.restApiContext,
id,
updateParams,
);
upsertTestDefinitions([updatedDefinition]);
updateRunFieldIssues(params.id);
return updatedDefinition;
};
/**
* Deletes a test definition by its ID.
*
* @param {number} id - The ID of the test definition to delete.
* @returns {Promise<boolean>} A promise that resolves to true if the test definition was successfully deleted, false otherwise.
*/
const deleteById = async (id: string) => {
const result = await testDefinitionsApi.deleteTestDefinition(rootStore.restApiContext, id);
if (result.success) {
deleteTestDefinition(id);
}
return result.success;
};
// Test Runs Methods
const fetchTestRuns = async (testDefinitionId: string) => {
loading.value = true;
try {
const runs = await testDefinitionsApi.getTestRuns(
rootStore.restApiContext,
testDefinitionId,
);
runs.forEach((run) => {
testRunsById.value[run.id] = run;
if (['running', 'new'].includes(run.status)) {
startPollingTestRun(testDefinitionId, run.id);
}
});
return runs;
} finally {
loading.value = false;
}
};
const getTestRun = async (params: { testDefinitionId: string; runId: string }) => {
const run = await testDefinitionsApi.getTestRun(rootStore.restApiContext, params);
testRunsById.value[run.id] = run;
updateRunFieldIssues(params.testDefinitionId);
return run;
};
const startTestRun = async (testDefinitionId: string) => {
const result = await testDefinitionsApi.startTestRun(
rootStore.restApiContext,
testDefinitionId,
);
return result;
};
const cancelTestRun = async (testDefinitionId: string, testRunId: string) => {
const result = await testDefinitionsApi.cancelTestRun(
rootStore.restApiContext,
testDefinitionId,
testRunId,
);
return result;
};
const deleteTestRun = async (params: { testDefinitionId: string; runId: string }) => {
const result = await testDefinitionsApi.deleteTestRun(rootStore.restApiContext, params);
if (result.success) {
const { [params.runId]: deleted, ...rest } = testRunsById.value;
testRunsById.value = rest;
}
return result;
};
// TODO: This is a temporary solution to poll for test run status.
// We should use a more efficient polling mechanism in the future.
const startPollingTestRun = (testDefinitionId: string, runId: string) => {
const poll = async () => {
const run = await getTestRun({ testDefinitionId, runId });
if (['running', 'new'].includes(run.status)) {
pollingTimeouts.value[runId] = setTimeout(poll, 1000);
} else {
delete pollingTimeouts.value[runId];
}
};
void poll();
};
const cleanupPolling = () => {
Object.values(pollingTimeouts.value).forEach((timeout) => {
clearTimeout(timeout);
});
pollingTimeouts.value = {};
};
const updateRunFieldIssues = (testId: string) => {
const issues: FieldIssue[] = [];
const testDefinition = testDefinitionsById.value[testId];
if (!testDefinition) {
return;
}
if (!testDefinition.annotationTagId) {
issues.push({
field: 'tags',
message: locale.baseText('testDefinition.configError.noEvaluationTag'),
});
} else {
const tagUsageCount = tagsStore.tagsById[testDefinition.annotationTagId]?.usageCount ?? 0;
if (tagUsageCount === 0) {
issues.push({
field: 'tags',
message: locale.baseText('testDefinition.configError.noExecutionsAddedToTag'),
});
}
}
if (!testDefinition.evaluationWorkflowId) {
issues.push({
field: 'evaluationWorkflow',
message: locale.baseText('testDefinition.configError.noEvaluationWorkflow'),
});
}
fieldsIssues.value = {
...fieldsIssues.value,
[testId]: issues,
};
return issues;
};
return {
// State
fetchedAll,
testDefinitionsById,
testRunsById,
testCaseExecutionsById,
// Computed
allTestDefinitions,
allTestDefinitionsByWorkflowId,
isLoading,
hasTestDefinitions,
isFeatureEnabled,
testRunsByTestId,
lastRunByTestId,
// Methods
fetchTestDefinition,
fetchTestDefinitionsByWorkflowId,
fetchTestCaseExecutions,
fetchAll,
fetchExampleEvaluationInput,
create,
update,
deleteById,
upsertTestDefinitions,
deleteTestDefinition,
fetchTestRuns,
getTestRun,
startTestRun,
cancelTestRun,
deleteTestRun,
cleanupPolling,
getFieldIssues,
updateRunFieldIssues,
};
},
{},
);

View File

@@ -23,6 +23,10 @@ const DEFAULT_STATE: UsageState = {
value: 0,
warningThreshold: 0.8,
},
workflowsHavingEvaluations: {
value: 0,
limit: 0,
},
},
license: {
planId: '',
@@ -41,6 +45,12 @@ export const useUsageStore = defineStore('usage', () => {
const planId = computed(() => state.data.license.planId);
const activeWorkflowTriggersLimit = computed(() => state.data.usage.activeWorkflowTriggers.limit);
const activeWorkflowTriggersCount = computed(() => state.data.usage.activeWorkflowTriggers.value);
const workflowsWithEvaluationsLimit = computed(
() => state.data.usage.workflowsHavingEvaluations.limit,
);
const workflowsWithEvaluationsCount = computed(
() => state.data.usage.workflowsHavingEvaluations.value,
);
const executionPercentage = computed(
() => (activeWorkflowTriggersCount.value / activeWorkflowTriggersLimit.value) * 100,
);
@@ -103,6 +113,8 @@ export const useUsageStore = defineStore('usage', () => {
planId,
activeWorkflowTriggersLimit,
activeWorkflowTriggersCount,
workflowsWithEvaluationsLimit,
workflowsWithEvaluationsCount,
executionPercentage,
instanceId,
managementToken,

View File

@@ -26,6 +26,10 @@ describe('Usage and plan store', () => {
value,
warningThreshold,
},
workflowsHavingEvaluations: {
value: 0,
limit: 0,
},
},
license: {
planId: '',

View File

@@ -0,0 +1,162 @@
<script setup lang="ts">
import { useWorkflowsStore } from '@/stores/workflows.store';
import { useUsageStore } from '@/stores/usage.store';
import { useAsyncState } from '@vueuse/core';
import { PLACEHOLDER_EMPTY_WORKFLOW_ID } from '@/constants';
import { useCanvasOperations } from '@/composables/useCanvasOperations';
import { useToast } from '@/composables/useToast';
import { useI18n } from '@/composables/useI18n';
import { useRouter } from 'vue-router';
import { useNodeTypesStore } from '@/stores/nodeTypes.store';
import { useEvaluationStore } from '@/stores/evaluation.store.ee';
import { computed } from 'vue';
import { N8nLink, N8nText } from '@n8n/design-system';
import EvaluationsPaywall from '@/components/Evaluations.ee/Paywall/EvaluationsPaywall.vue';
import SetupWizard from '@/components/Evaluations.ee/SetupWizard/SetupWizard.vue';
const props = defineProps<{
name: string;
}>();
const workflowsStore = useWorkflowsStore();
const usageStore = useUsageStore();
const evaluationStore = useEvaluationStore();
const router = useRouter();
const toast = useToast();
const locale = useI18n();
const nodeTypesStore = useNodeTypesStore();
const { initializeWorkspace } = useCanvasOperations({ router });
const evaluationsLicensed = computed(() => {
return usageStore.workflowsWithEvaluationsLimit !== 0;
});
const showWizard = computed(() => {
const runs = Object.values(evaluationStore.testRunsById ?? {}).filter(
({ workflowId }) => workflowId === props.name,
);
return runs.length === 0;
});
// Method to run a test - will be used by the SetupWizard component
async function runTest() {
try {
await evaluationStore.startTestRun(props.name);
} catch (error) {
toast.showError(error, locale.baseText('evaluation.listRuns.error.cantStartTestRun'));
return;
}
try {
await evaluationStore.fetchTestRuns(props.name);
} catch (error) {
toast.showError(error, locale.baseText('evaluation.listRuns.error.cantFetchTestRuns'));
}
}
const { isReady } = useAsyncState(async () => {
try {
await usageStore.getLicenseInfo();
await evaluationStore.fetchTestRuns(props.name);
} catch (error) {
toast.showError(error, locale.baseText('evaluation.listRuns.error.cantFetchTestRuns'));
}
const workflowId = props.name;
const isAlreadyInitialized = workflowsStore.workflow.id === workflowId;
if (isAlreadyInitialized) return;
if (workflowId && workflowId !== 'new') {
// Check if we are loading the Evaluation tab directly, without having loaded the workflow
if (workflowsStore.workflow.id === PLACEHOLDER_EMPTY_WORKFLOW_ID) {
try {
const data = await workflowsStore.fetchWorkflow(workflowId);
// We need to check for the evaluation node with setMetrics operation, so we need to initialize the nodeTypesStore to have node properties initialized
if (nodeTypesStore.allNodeTypes.length === 0) {
await nodeTypesStore.getNodeTypes();
}
initializeWorkspace(data);
} catch (error) {
toast.showError(error, locale.baseText('nodeView.showError.openWorkflow.title'));
}
}
}
}, undefined);
</script>
<template>
<div :class="$style.evaluationsView">
<template v-if="isReady && showWizard">
<div :class="$style.setupContent">
<div>
<N8nText size="large" color="text-dark" tag="h3" bold>
{{ locale.baseText('evaluations.setupWizard.title') }}
</N8nText>
<N8nText tag="p" size="small" color="text-base" :class="$style.description">
{{ locale.baseText('evaluations.setupWizard.description') }}
<N8nLink size="small" href="https://docs.n8n.io/advanced-ai/evaluations/overview">{{
locale.baseText('evaluations.setupWizard.moreInfo')
}}</N8nLink>
</N8nText>
</div>
<div :class="$style.config">
<iframe
style="min-width: 500px"
width="500"
height="280"
src="https://www.youtube.com/embed/5LlF196PKaE"
title="n8n Evaluation quickstart"
frameborder="0"
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
referrerpolicy="strict-origin-when-cross-origin"
allowfullscreen
></iframe>
<SetupWizard v-if="evaluationsLicensed" @run-test="runTest" />
<EvaluationsPaywall v-else />
</div>
</div>
</template>
<router-view v-else-if="isReady" />
</div>
</template>
<style module lang="scss">
.evaluationsView {
width: 100%;
height: 100%;
display: flex;
justify-content: center;
}
.setupContent {
display: flex;
flex-direction: column;
gap: var(--spacing-l);
max-width: 1024px;
margin-top: var(--spacing-2xl);
padding: 0;
}
.description {
max-width: 600px;
margin-bottom: 20px;
}
.config {
display: flex;
flex-direction: row;
gap: var(--spacing-l);
}
.setupDescription {
margin-top: var(--spacing-2xs);
ul {
li {
margin-top: var(--spacing-2xs);
}
}
}
</style>

View File

@@ -0,0 +1,123 @@
<script setup lang="ts">
import { useI18n } from '@/composables/useI18n';
import { computed, ref } from 'vue';
import RunsSection from '@/components/Evaluations.ee/ListRuns/RunsSection.vue';
import { useEvaluationStore } from '@/stores/evaluation.store.ee';
import { N8nButton } from '@n8n/design-system';
import { orderBy } from 'lodash-es';
import { useToast } from '@/composables/useToast';
const props = defineProps<{
name: string;
}>();
const locale = useI18n();
const toast = useToast();
const evaluationStore = useEvaluationStore();
const selectedMetric = ref<string>('');
async function runTest() {
try {
await evaluationStore.startTestRun(props.name);
} catch (error) {
toast.showError(error, locale.baseText('evaluation.listRuns.error.cantStartTestRun'));
}
try {
await evaluationStore.fetchTestRuns(props.name);
} catch (error) {
toast.showError(error, locale.baseText('evaluation.listRuns.error.cantFetchTestRuns'));
}
}
const runs = computed(() => {
const testRuns = Object.values(evaluationStore.testRunsById ?? {}).filter(
({ workflowId }) => workflowId === props.name,
);
return orderBy(testRuns, (record) => new Date(record.runAt), ['asc']).map((record, index) => ({
...record,
index: index + 1,
}));
});
const isRunning = computed(() => runs.value.some((run) => run.status === 'running'));
const isRunTestEnabled = computed(() => !isRunning.value);
</script>
<template>
<div :class="$style.evaluationsView">
<div :class="$style.header">
<N8nTooltip :disabled="isRunTestEnabled" :placement="'left'">
<N8nButton
:disabled="!isRunTestEnabled"
:class="$style.runTestButton"
size="small"
data-test-id="run-test-button"
:label="locale.baseText('evaluation.runTest')"
type="primary"
@click="runTest"
/>
<template #content>
<template v-if="isRunning">
{{ locale.baseText('evaluation.testIsRunning') }}
</template>
</template>
</N8nTooltip>
</div>
<div :class="$style.wrapper">
<div :class="$style.content">
<RunsSection
v-model:selectedMetric="selectedMetric"
:class="$style.runs"
:runs="runs"
:workflow-id="props.name"
/>
</div>
</div>
</div>
</template>
<style module lang="scss">
.evaluationsView {
width: 100%;
}
.content {
display: flex;
justify-content: center;
gap: var(--spacing-m);
padding-bottom: var(--spacing-m);
}
.header {
display: flex;
justify-content: end;
align-items: center;
padding: var(--spacing-m) var(--spacing-l);
padding-left: 27px;
padding-bottom: 8px;
position: sticky;
top: 0;
left: 0;
background-color: var(--color-background-light);
z-index: 2;
}
.wrapper {
padding: 0 var(--spacing-l);
padding-left: 58px;
}
.runTestButton {
white-space: nowrap;
}
.runs {
width: 100%;
max-width: 1024px;
}
</style>

View File

@@ -0,0 +1,413 @@
<script setup lang="ts">
import type { TestCaseExecutionRecord } from '@/api/evaluation.ee';
import type { TestTableColumn } from '@/components/Evaluations.ee/shared/TestTableBase.vue';
import TestTableBase from '@/components/Evaluations.ee/shared/TestTableBase.vue';
import { useI18n } from '@/composables/useI18n';
import { useToast } from '@/composables/useToast';
import { VIEWS } from '@/constants';
import type { BaseTextKey } from '@/plugins/i18n';
import { useEvaluationStore } from '@/stores/evaluation.store.ee';
import { useWorkflowsStore } from '@/stores/workflows.store';
import { convertToDisplayDate } from '@/utils/formatters/dateFormatter';
import { N8nText, N8nTooltip, N8nIcon } from '@n8n/design-system';
import { computed, onMounted, ref } from 'vue';
import { useRouter } from 'vue-router';
import { orderBy } from 'lodash-es';
import { statusDictionary } from '@/components/Evaluations.ee/shared/statusDictionary';
import { getErrorBaseKey } from '@/components/Evaluations.ee/shared/errorCodes';
const router = useRouter();
const toast = useToast();
const evaluationStore = useEvaluationStore();
const workflowsStore = useWorkflowsStore();
const locale = useI18n();
const isLoading = ref(true);
const testCases = ref<TestCaseExecutionRecord[]>([]);
const hasFailedTestCases = ref<boolean>(false);
const runId = computed(() => router.currentRoute.value.params.runId as string);
const workflowId = computed(() => router.currentRoute.value.params.name as string);
const workflowName = computed(() => workflowsStore.getWorkflowById(workflowId.value)?.name ?? '');
const run = computed(() => evaluationStore.testRunsById[runId.value]);
const runErrorDetails = computed(() => {
return run.value?.errorDetails as Record<string, string | number>;
});
const filteredTestCases = computed(() =>
orderBy(testCases.value, (record) => record.runAt, ['asc']).map((record, index) =>
Object.assign(record, { index: index + 1 }),
),
);
const testRunIndex = computed(() =>
Object.values(
orderBy(evaluationStore.testRunsById, (record) => new Date(record.runAt), ['asc']).filter(
({ workflowId: wId }) => wId === workflowId.value,
) ?? {},
).findIndex(({ id }) => id === runId.value),
);
const formattedTime = computed(() => convertToDisplayDate(new Date(run.value?.runAt).getTime()));
const handleRowClick = (row: TestCaseExecutionRecord) => {
const executionId = row.executionId;
if (executionId) {
const { href } = router.resolve({
name: VIEWS.EXECUTION_PREVIEW,
params: {
name: workflowId.value,
executionId,
},
});
window.open(href, '_blank');
}
};
const columns = computed(
(): Array<TestTableColumn<TestCaseExecutionRecord & { index: number }>> => [
{
prop: 'index',
width: 100,
label: locale.baseText('evaluation.runDetail.testCase'),
sortable: true,
formatter: (row: TestCaseExecutionRecord & { index: number }) => `#${row.index}`,
},
{
prop: 'status',
label: locale.baseText('evaluation.listRuns.status'),
},
...Object.keys(run.value?.metrics ?? {}).map((metric) => ({
prop: `metrics.${metric}`,
label: metric,
sortable: true,
filter: true,
showHeaderTooltip: true,
formatter: (row: TestCaseExecutionRecord) => row.metrics?.[metric]?.toFixed(2) ?? '-',
})),
],
);
const metrics = computed(() => run.value?.metrics ?? {});
// Temporary workaround to fetch test cases by manually getting workflow executions
const fetchExecutionTestCases = async () => {
if (!runId.value || !workflowId.value) return;
isLoading.value = true;
try {
const testRun = await evaluationStore.getTestRun({
workflowId: workflowId.value,
runId: runId.value,
});
const testCaseEvaluationExecutions = await evaluationStore.fetchTestCaseExecutions({
workflowId: workflowId.value,
runId: testRun.id,
});
testCases.value = testCaseEvaluationExecutions ?? [];
hasFailedTestCases.value = testCaseEvaluationExecutions?.some(
(testCase) => testCase.status === 'error',
);
await evaluationStore.fetchTestRuns(run.value.workflowId);
} catch (error) {
toast.showError(error, locale.baseText('evaluation.listRuns.toast.error.fetchTestCases'));
} finally {
isLoading.value = false;
}
};
onMounted(async () => {
await fetchExecutionTestCases();
});
</script>
<template>
<div :class="$style.container" data-test-id="test-definition-run-detail">
<div :class="$style.header">
<button :class="$style.backButton" @click="router.back()">
<font-awesome-icon icon="arrow-left" />
<n8n-heading size="large" :bold="true">{{
locale.baseText('evaluation.listRuns.runListHeader', {
interpolate: {
name: workflowName,
},
})
}}</n8n-heading>
</button>
<span :class="$style.headerSeparator">/</span>
<n8n-heading size="large" :bold="true">
{{
locale.baseText('evaluation.listRuns.testCasesListHeader', {
interpolate: {
index: testRunIndex + 1,
},
})
}}
</n8n-heading>
</div>
<n8n-callout
v-if="run?.status === 'error'"
theme="danger"
icon="exclamation-triangle"
class="mb-s"
>
<N8nText size="small" :class="$style.capitalized">
{{
locale.baseText(
`${getErrorBaseKey(run?.errorCode)}` as BaseTextKey,
runErrorDetails ? { interpolate: runErrorDetails } : {},
) ?? locale.baseText(`${getErrorBaseKey('UNKNOWN_ERROR')}` as BaseTextKey)
}}
</N8nText>
</n8n-callout>
<el-scrollbar always :class="$style.scrollableSummary" class="mb-m">
<div style="display: flex">
<div :class="$style.summaryCard">
<N8nText size="small" :class="$style.summaryCardTitle">
{{ locale.baseText('evaluation.runDetail.totalCases') }}
</N8nText>
<N8nText size="xlarge" :class="$style.summaryCardContentLargeNumber" bold>{{
testCases.length
}}</N8nText>
</div>
<div :class="$style.summaryCard">
<N8nText size="small" :class="$style.summaryCardTitle">
{{ locale.baseText('evaluation.runDetail.ranAt') }}
</N8nText>
<div>
<N8nText size="medium"> {{ formattedTime.date }} {{ formattedTime.time }} </N8nText>
</div>
</div>
<div :class="$style.summaryCard">
<N8nText size="small" :class="$style.summaryCardTitle">
{{ locale.baseText('evaluation.listRuns.status') }}
</N8nText>
<N8nText
v-if="run?.status === 'completed' && hasFailedTestCases"
size="medium"
color="warning"
>
{{ locale.baseText(`evaluation.runDetail.error.partialCasesFailed`) }}
</N8nText>
<N8nText
v-else
:color="statusDictionary[run?.status]?.color"
size="medium"
:class="run?.status.toLowerCase()"
style="text-transform: capitalize"
>
{{ run?.status }}
</N8nText>
</div>
<div v-for="(value, key) in metrics" :key="key" :class="$style.summaryCard">
<N8nTooltip :content="key" placement="top">
<N8nText
size="small"
:class="$style.summaryCardTitle"
style="text-overflow: ellipsis; overflow: hidden"
>
{{ key }}
</N8nText>
</N8nTooltip>
<N8nText size="xlarge" :class="$style.summaryCardContentLargeNumber" bold>{{
value.toFixed(2)
}}</N8nText>
</div>
</div>
</el-scrollbar>
<div v-if="isLoading" :class="$style.loading">
<n8n-loading :loading="true" :rows="5" />
</div>
<TestTableBase
v-else
:data="filteredTestCases"
:columns="columns"
:default-sort="{ prop: 'id', order: 'descending' }"
@row-click="handleRowClick"
>
<template #id="{ row }">
<div style="display: flex; justify-content: space-between; gap: 10px">
{{ row.id }}
</div>
</template>
<template #status="{ row }">
<div style="display: inline-flex; gap: 12px; align-items: center; max-width: 100%">
<N8nIcon
:icon="statusDictionary[row.status].icon"
:color="statusDictionary[row.status].color"
/>
<template v-if="row.status === 'error'">
<N8nTooltip placement="top" :show-after="300">
<template #content>
{{
locale.baseText(`${getErrorBaseKey(row.errorCode)}` as BaseTextKey) || row.status
}}
</template>
<N8nText color="danger" :class="$style.capitalized">
{{
locale.baseText(`${getErrorBaseKey(row.errorCode)}` as BaseTextKey) || row.status
}}
</N8nText>
</N8nTooltip>
</template>
<template v-else>
<N8nText :class="$style.capitalized">
{{ row.status }}
</N8nText>
</template>
</div>
</template>
</TestTableBase>
</div>
</template>
<style module lang="scss">
.container {
height: 100%;
width: 100%;
max-width: var(--content-container-width);
margin: auto;
padding: var(--spacing-l) var(--spacing-2xl) 0;
}
.header {
display: flex;
align-items: center;
gap: var(--spacing-2xs);
margin-bottom: var(--spacing-l);
.timestamp {
color: var(--color-text-base);
font-size: var(--font-size-s);
}
}
.backButton {
display: flex;
align-items: center;
gap: var(--spacing-3xs);
padding: 0;
border: none;
background: none;
cursor: pointer;
color: var(--color-text-base);
transition: color 0.1s ease-in-out;
&:hover {
color: var(--color-primary);
}
}
.headerSeparator {
font-size: var(--font-size-xl);
color: var(--color-text-light);
}
.summary {
margin-bottom: var(--spacing-m);
.summaryStats {
display: flex;
gap: var(--spacing-l);
}
}
.stat {
display: flex;
flex-direction: column;
}
.controls {
display: flex;
gap: var(--spacing-s);
margin-bottom: var(--spacing-s);
}
.downloadButton {
margin-bottom: var(--spacing-s);
}
.loading {
display: flex;
justify-content: center;
align-items: center;
height: 200px;
}
.scrollableSummary {
border: var(--border-width-base) var(--border-style-base) var(--color-foreground-base);
border-radius: 5px;
background-color: var(--color-background-xlight);
:global(.el-scrollbar__bar) {
opacity: 1;
}
:global(.el-scrollbar__thumb) {
background-color: var(--color-foreground-base);
&:hover {
background-color: var(--color-foreground-dark);
}
}
}
.summaryCard {
height: 100px;
box-sizing: border-box;
padding: var(--spacing-s);
border-right: var(--border-width-base) var(--border-style-base) var(--color-foreground-base);
flex-basis: 169px;
flex-shrink: 0;
max-width: 170px;
display: flex;
flex-direction: column;
justify-content: space-between;
&:first-child {
border-top-left-radius: inherit;
border-bottom-left-radius: inherit;
}
}
.capitalized {
text-transform: none;
}
.capitalized::first-letter {
text-transform: uppercase;
}
.summaryCardTitle {
display: inline;
width: fit-content;
max-width: 100%;
flex-shrink: 0;
text-overflow: ellipsis;
overflow: hidden;
white-space: nowrap;
color: var(--color-text-base);
}
.summaryCardContentLargeNumber {
font-size: 32px;
line-height: 1;
}
.alertText {
display: -webkit-box;
-webkit-line-clamp: 2;
line-clamp: 2;
-webkit-box-orient: vertical;
max-width: 100%;
text-overflow: ellipsis;
overflow: hidden;
white-space: normal;
word-break: break-word;
color: var(--color-text-danger);
font-size: var(--font-size-2xs);
line-height: 1.25;
}
</style>

View File

@@ -0,0 +1,94 @@
import { describe, it, expect, beforeEach } from 'vitest';
import { mock } from 'vitest-mock-extended';
import { createTestingPinia } from '@pinia/testing';
import { createComponentRenderer } from '@/__tests__/render';
import EvaluationRootView from '../EvaluationsRootView.vue';
import { useWorkflowsStore } from '@/stores/workflows.store';
import { useEvaluationStore } from '@/stores/evaluation.store.ee';
import { mockedStore } from '@/__tests__/utils';
import type { IWorkflowDb } from '@/Interface';
import { waitFor } from '@testing-library/vue';
import type { TestRunRecord } from '@/api/evaluation.ee';
import { PLACEHOLDER_EMPTY_WORKFLOW_ID } from '@/constants';
describe('EvaluationsRootView', () => {
const renderComponent = createComponentRenderer(EvaluationRootView);
const mockWorkflow: IWorkflowDb = {
id: 'different-id',
name: 'Test Workflow',
active: false,
isArchived: false,
createdAt: Date.now(),
updatedAt: Date.now(),
nodes: [],
connections: {},
settings: {
executionOrder: 'v1',
},
tags: [],
pinData: {},
versionId: '',
usedCredentials: [],
};
const mockTestRuns: TestRunRecord[] = [mock<TestRunRecord>({ workflowId: mockWorkflow.id })];
beforeEach(() => {
createTestingPinia();
});
it('should initialize workflow on mount if not already initialized', async () => {
const workflowsStore = mockedStore(useWorkflowsStore);
const uninitializedWorkflow = { ...mockWorkflow, id: PLACEHOLDER_EMPTY_WORKFLOW_ID };
workflowsStore.workflow = uninitializedWorkflow;
const newWorkflowId = 'workflow123';
renderComponent({ props: { name: newWorkflowId } });
// Wait for async operation to complete
await waitFor(() => expect(workflowsStore.fetchWorkflow).toHaveBeenCalledWith(newWorkflowId));
});
it('should not initialize workflow if already loaded', async () => {
const workflowsStore = mockedStore(useWorkflowsStore);
workflowsStore.workflow = mockWorkflow;
renderComponent({ props: { name: mockWorkflow.id } });
expect(workflowsStore.fetchWorkflow).not.toHaveBeenCalled();
});
it('should load test data', async () => {
const evaluationStore = mockedStore(useEvaluationStore);
evaluationStore.fetchTestRuns.mockResolvedValue(mockTestRuns);
renderComponent({ props: { name: mockWorkflow.id } });
await waitFor(() =>
expect(evaluationStore.fetchTestRuns).toHaveBeenCalledWith(mockWorkflow.id),
);
});
it('should not render setup wizard when there are test runs', async () => {
const workflowsStore = mockedStore(useWorkflowsStore);
workflowsStore.fetchWorkflow.mockResolvedValue(mockWorkflow);
const evaluationStore = mockedStore(useEvaluationStore);
evaluationStore.testRunsById = { foo: mock<TestRunRecord>({ workflowId: mockWorkflow.id }) };
const { container } = renderComponent({ props: { name: mockWorkflow.id } });
// Check that setupContent is not present
await waitFor(() => expect(container.querySelector('.setupContent')).toBeFalsy());
});
it('should render the setup wizard when there there are no test runs', async () => {
const workflowsStore = mockedStore(useWorkflowsStore);
workflowsStore.fetchWorkflow.mockResolvedValue(mockWorkflow);
const { container } = renderComponent({ props: { name: mockWorkflow.id } });
await waitFor(() => expect(container.querySelector('.setupContent')).toBeTruthy());
});
});

View File

@@ -0,0 +1,102 @@
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import { createTestingPinia } from '@pinia/testing';
import { createComponentRenderer } from '@/__tests__/render';
import EvaluationsView from '@/views/Evaluations.ee/EvaluationsView.vue';
import { cleanupAppModals, createAppModals, mockedStore } from '@/__tests__/utils';
import { useEvaluationStore } from '@/stores/evaluation.store.ee';
import userEvent from '@testing-library/user-event';
import type { TestRunRecord } from '@/api/evaluation.ee';
import { waitFor } from '@testing-library/vue';
// import { useWorkflowsStore } from '@/stores/workflows.store';
vi.mock('vue-router', () => {
const push = vi.fn();
const replace = vi.fn();
const query = {};
return {
useRouter: () => ({
push,
replace,
}),
useRoute: () => ({
query,
}),
RouterLink: {
template: '<a><slot /></a>',
},
};
});
const renderComponent = createComponentRenderer(EvaluationsView, {
props: { name: 'workflow-id' },
});
describe('EvaluationsView', () => {
const mockTestRuns: TestRunRecord[] = [
{
id: 'run1',
workflowId: 'workflow-id',
status: 'completed',
runAt: '2023-01-01',
createdAt: '2023-01-01',
updatedAt: '2023-01-01',
completedAt: '2023-01-01',
metrics: {
some: 1,
},
},
];
beforeEach(() => {
createTestingPinia();
createAppModals();
});
afterEach(() => {
vi.clearAllMocks();
cleanupAppModals();
});
describe('Test Runs functionality', () => {
it('should display test runs table when runs exist', async () => {
const evaluationStore = mockedStore(useEvaluationStore);
evaluationStore.testRunsById = {
[mockTestRuns[0].id]: mockTestRuns[0],
};
evaluationStore.fetchTestRuns.mockResolvedValue(mockTestRuns);
const { getByTestId } = renderComponent();
await waitFor(() => expect(getByTestId('past-runs-table')).toBeInTheDocument());
// expect(getByTestId('past-runs-table')).toBeInTheDocument();
});
it('should start a test run when run test button is clicked', async () => {
const evaluationStore = mockedStore(useEvaluationStore);
evaluationStore.testRunsById = {
run1: {
id: 'run1',
workflowId: 'workflow-id',
status: 'completed',
runAt: '2023-01-01',
createdAt: '2023-01-01',
updatedAt: '2023-01-01',
completedAt: '2023-01-01',
metrics: {
some: 1,
},
},
};
const { getByTestId } = renderComponent();
await waitFor(() => expect(getByTestId('run-test-button')).toBeInTheDocument());
await userEvent.click(getByTestId('run-test-button'));
expect(evaluationStore.startTestRun).toHaveBeenCalledWith('workflow-id');
expect(evaluationStore.fetchTestRuns).toHaveBeenCalledWith('workflow-id');
});
});
});

View File

@@ -70,7 +70,12 @@ import {
import { useSourceControlStore } from '@/stores/sourceControl.store';
import { useNodeCreatorStore } from '@/stores/nodeCreator.store';
import { useExternalHooks } from '@/composables/useExternalHooks';
import { NodeConnectionTypes, jsonParse } from 'n8n-workflow';
import {
NodeConnectionTypes,
jsonParse,
EVALUATION_TRIGGER_NODE_TYPE,
EVALUATION_NODE_TYPE,
} from 'n8n-workflow';
import type { NodeConnectionType, IDataObject, ExecutionSummary, IConnection } from 'n8n-workflow';
import { useToast } from '@/composables/useToast';
import { useSettingsStore } from '@/stores/settings.store';
@@ -327,6 +332,22 @@ async function initializeRoute(force = false) {
return;
}
// Open node panel if the route has a corresponding action
if (route.query.action === 'addEvaluationTrigger') {
nodeCreatorStore.openNodeCreatorForTriggerNodes(
NODE_CREATOR_OPEN_SOURCES.ADD_EVALUATION_TRIGGER_BUTTON,
);
} else if (route.query.action === 'addEvaluationNode') {
nodeCreatorStore.openNodeCreatorForActions(
EVALUATION_NODE_TYPE,
NODE_CREATOR_OPEN_SOURCES.ADD_EVALUATION_NODE_BUTTON,
);
} else if (route.query.action === 'executeEvaluation') {
if (evaluationTriggerNode.value) {
void runEntireWorkflow('node', evaluationTriggerNode.value.name);
}
}
const isAlreadyInitialized =
!force &&
initializedWorkflowId.value &&
@@ -1361,6 +1382,13 @@ function onOpenChat() {
startChat('main');
}
/**
* Evaluation
*/
const evaluationTriggerNode = computed(() => {
return editableWorkflow.value.nodes.find((node) => node.type === EVALUATION_TRIGGER_NODE_TYPE);
});
/**
* History events
*/

View File

@@ -1,296 +0,0 @@
<script setup lang="ts">
import { useTestDefinitionForm } from '@/components/TestDefinition/composables/useTestDefinitionForm';
import { useDebounce } from '@/composables/useDebounce';
import { useI18n } from '@/composables/useI18n';
import { useTelemetry } from '@/composables/useTelemetry';
import { useToast } from '@/composables/useToast';
import { NODE_PINNING_MODAL_KEY, VIEWS } from '@/constants';
import { useAnnotationTagsStore } from '@/stores/tags.store';
import { computed, ref, watch } from 'vue';
import { useRouter } from 'vue-router';
import InlineNameEdit from '@/components/InlineNameEdit.vue';
import ConfigSection from '@/components/TestDefinition/EditDefinition/sections/ConfigSection.vue';
import RunsSection from '@/components/TestDefinition/EditDefinition/sections/RunsSection.vue';
import { useTestDefinitionStore } from '@/stores/testDefinition.store.ee';
import { useUIStore } from '@/stores/ui.store';
import { useWorkflowsStore } from '@/stores/workflows.store';
import { N8nButton, N8nIconButton, N8nText } from '@n8n/design-system';
import { useDocumentVisibility } from '@vueuse/core';
import { orderBy } from 'lodash-es';
import type { IDataObject, IPinData } from 'n8n-workflow';
const props = defineProps<{
testId: string;
name: string;
}>();
const router = useRouter();
const locale = useI18n();
const { debounce } = useDebounce();
const toast = useToast();
const testDefinitionStore = useTestDefinitionStore();
const tagsStore = useAnnotationTagsStore();
const uiStore = useUIStore();
const workflowStore = useWorkflowsStore();
const telemetry = useTelemetry();
const visibility = useDocumentVisibility();
watch(visibility, async () => {
if (visibility.value !== 'visible') return;
await tagsStore.fetchAll({ force: true, withUsageCount: true });
await getExamplePinnedDataForTags();
testDefinitionStore.updateRunFieldIssues(props.testId);
});
const { state, isSaving, cancelEditing, loadTestData, updateTest, startEditing, saveChanges } =
useTestDefinitionForm();
const isLoading = computed(() => tagsStore.isLoading);
const tagsById = computed(() => tagsStore.tagsById);
const currentWorkflowId = computed(() => props.name);
const workflowName = computed(() => workflowStore.workflow.name);
const hasRuns = computed(() => runs.value.length > 0);
const fieldsIssues = computed(() => testDefinitionStore.getFieldIssues(props.testId) ?? []);
const showConfig = ref(true);
const selectedMetric = ref<string>('');
const examplePinnedData = ref<IPinData>({});
void loadTestData(props.testId, props.name);
const handleUpdateTest = async () => {
try {
await updateTest(props.testId);
} catch (e: unknown) {
toast.showError(e, locale.baseText('testDefinition.edit.testSaveFailed'));
}
};
const handleUpdateTestDebounced = debounce(handleUpdateTest, { debounceTime: 400, trailing: true });
function getFieldIssues(key: string) {
return fieldsIssues.value.filter((issue) => issue.field === key);
}
async function openPinningModal() {
uiStore.openModal(NODE_PINNING_MODAL_KEY);
}
async function runTest() {
await testDefinitionStore.startTestRun(props.testId);
await testDefinitionStore.fetchTestRuns(props.testId);
}
async function openExecutionsViewForTag() {
const executionsRoute = router.resolve({
name: VIEWS.WORKFLOW_EXECUTIONS,
params: { name: currentWorkflowId.value },
query: { tag: state.value.tags.value[0], testId: props.testId },
});
window.open(executionsRoute.href, '_blank');
}
const runs = computed(() => {
const testRuns = Object.values(testDefinitionStore.testRunsById ?? {}).filter(
({ testDefinitionId }) => testDefinitionId === props.testId,
);
return orderBy(testRuns, (record) => new Date(record.runAt), ['asc']).map((record, index) =>
Object.assign(record, { index: index + 1 }),
);
});
const isRunning = computed(() => runs.value.some((run) => run.status === 'running'));
const isRunTestEnabled = computed(() => fieldsIssues.value.length === 0 && !isRunning.value);
async function renameTag(newName: string) {
await tagsStore.rename({ id: state.value.tags.value[0], name: newName });
}
async function getExamplePinnedDataForTags() {
const exampleInput = await testDefinitionStore.fetchExampleEvaluationInput(
props.testId,
state.value.tags.value[0],
);
if (exampleInput !== null) {
examplePinnedData.value = {
'When called by a test run': [
{
json: exampleInput as IDataObject,
},
],
};
}
}
watch(() => state.value.tags, getExamplePinnedDataForTags);
const updateName = (value: string) => {
state.value.name.value = value;
void handleUpdateTestDebounced();
};
const updateDescription = (value: string) => {
state.value.description.value = value;
void handleUpdateTestDebounced();
};
function onEvaluationWorkflowCreated(workflowId: string) {
telemetry.track('User created evaluation workflow from test', {
test_id: props.testId,
subworkflow_id: workflowId,
});
}
</script>
<template>
<div v-if="!isLoading" :class="[$style.container]">
<div :class="$style.header">
<div style="display: flex; align-items: center">
<N8nIconButton
icon="arrow-left"
type="tertiary"
text
@click="router.push({ name: VIEWS.TEST_DEFINITION, params: { testId } })"
></N8nIconButton>
<InlineNameEdit
:model-value="state.name.value"
max-height="none"
type="Test name"
@update:model-value="updateName"
>
<N8nText bold size="xlarge" color="text-dark">{{ state.name.value }}</N8nText>
</InlineNameEdit>
</div>
<div style="display: flex; align-items: center; gap: 10px">
<N8nText v-if="hasRuns" color="text-light" size="small">
{{
isSaving
? locale.baseText('testDefinition.edit.saving')
: locale.baseText('testDefinition.edit.saved')
}}
</N8nText>
<N8nTooltip :disabled="isRunTestEnabled" :placement="'left'">
<N8nButton
:disabled="!isRunTestEnabled"
:class="$style.runTestButton"
size="small"
data-test-id="run-test-button"
:label="locale.baseText('testDefinition.runTest')"
type="primary"
@click="runTest"
/>
<template #content>
<template v-if="fieldsIssues.length > 0">
<div>{{ locale.baseText('testDefinition.completeConfig') }}</div>
<div v-for="issue in fieldsIssues" :key="issue.field">- {{ issue.message }}</div>
</template>
<template v-if="isRunning">
{{ locale.baseText('testDefinition.testIsRunning') }}
</template>
</template>
</N8nTooltip>
</div>
</div>
<div :class="$style.wrapper">
<div :class="$style.description">
<InlineNameEdit
:model-value="state.description.value"
placeholder="Add a description..."
:required="false"
:autosize="{ minRows: 1, maxRows: 3 }"
input-type="textarea"
:maxlength="260"
max-height="none"
type="Test description"
@update:model-value="updateDescription"
>
<N8nText size="medium" color="text-base">{{ state.description.value }}</N8nText>
</InlineNameEdit>
</div>
<div :class="{ [$style.content]: true, [$style.contentWithRuns]: hasRuns }">
<RunsSection
v-if="hasRuns"
v-model:selectedMetric="selectedMetric"
:class="$style.runs"
:runs="runs"
:test-id="testId"
/>
<ConfigSection
v-if="showConfig"
v-model:tags="state.tags"
v-model:evaluationWorkflow="state.evaluationWorkflow"
v-model:mockedNodes="state.mockedNodes"
:class="$style.config"
:cancel-editing="cancelEditing"
:tags-by-id="tagsById"
:is-loading="isLoading"
:get-field-issues="getFieldIssues"
:start-editing="startEditing"
:save-changes="saveChanges"
:has-runs="hasRuns"
:example-pinned-data="examplePinnedData"
:sample-workflow-name="workflowName"
@rename-tag="renameTag"
@update:evaluation-workflow="handleUpdateTestDebounced"
@update:mocked-nodes="handleUpdateTestDebounced"
@open-pinning-modal="openPinningModal"
@open-executions-view-for-tag="openExecutionsViewForTag"
@evaluation-workflow-created="onEvaluationWorkflowCreated($event)"
/>
</div>
</div>
</div>
</template>
<style module lang="scss">
.content {
display: flex;
justify-content: center;
gap: var(--spacing-m);
padding-bottom: var(--spacing-m);
}
.config {
width: 480px;
.contentWithRuns & {
width: 400px;
}
}
.header {
display: flex;
justify-content: space-between;
align-items: center;
padding: var(--spacing-m) var(--spacing-l);
padding-left: 27px;
padding-bottom: 8px;
position: sticky;
top: 0;
left: 0;
background-color: var(--color-background-light);
z-index: 2;
}
.wrapper {
padding: 0 var(--spacing-l);
padding-left: 58px;
}
.description {
max-width: 600px;
margin-bottom: 20px;
}
.arrowBack {
--button-hover-background-color: transparent;
border: 0;
}
</style>

View File

@@ -1,278 +0,0 @@
<script setup lang="ts">
import EmptyState from '@/components/TestDefinition/ListDefinition/EmptyState.vue';
import TestItem from '@/components/TestDefinition/ListDefinition/TestItem.vue';
import { useI18n } from '@/composables/useI18n';
import { useMessage } from '@/composables/useMessage';
import { useToast } from '@/composables/useToast';
import { MODAL_CONFIRM, VIEWS } from '@/constants';
import { useTestDefinitionStore } from '@/stores/testDefinition.store.ee';
import { useAsyncState } from '@vueuse/core';
import { orderBy } from 'lodash-es';
import {
N8nActionToggle,
N8nButton,
N8nHeading,
N8nIconButton,
N8nLoading,
N8nTooltip,
} from '@n8n/design-system';
import { computed, h } from 'vue';
import { RouterLink, useRouter } from 'vue-router';
const props = defineProps<{
name: string;
}>();
const router = useRouter();
const testDefinitionStore = useTestDefinitionStore();
const toast = useToast();
const locale = useI18n();
const { confirm } = useMessage();
const { isLoading } = useAsyncState(
async () => {
await testDefinitionStore.fetchAll({ workflowId: props.name });
const response = testDefinitionStore.allTestDefinitionsByWorkflowId[props.name] ?? [];
response.forEach((test) => testDefinitionStore.updateRunFieldIssues(test.id));
return [];
},
[],
{
onError: (error) => toast.showError(error, locale.baseText('testDefinition.list.loadError')),
shallow: false,
},
);
const tests = computed(() => testDefinitionStore.allTestDefinitionsByWorkflowId[props.name]);
const listItems = computed(() =>
orderBy(tests.value, [(test) => new Date(test.updatedAt ?? test.createdAt)], ['desc']).map(
(test) => ({
...test,
testCases: (testDefinitionStore.testRunsByTestId[test.id] || []).length,
lastExecution: testDefinitionStore.lastRunByTestId[test.id] ?? undefined,
isTestRunning: isTestRunning(test.id),
setupErrors: testDefinitionStore.getFieldIssues(test.id) ?? [],
}),
),
);
const commands = {
delete: onDeleteTest,
} as const;
type Action = { label: string; value: keyof typeof commands; disabled: boolean };
const actions = computed<Action[]>(() => [
{
label: 'Delete',
value: 'delete',
disabled: false,
},
]);
const handleAction = async (action: string, testId: string) =>
await commands[action as Action['value']](testId);
function isTestRunning(testId: string) {
return testDefinitionStore.lastRunByTestId[testId]?.status === 'running';
}
function onCreateTest() {
void router.push({ name: VIEWS.NEW_TEST_DEFINITION });
}
async function onRunTest(testId: string) {
try {
const result = await testDefinitionStore.startTestRun(testId);
if (result.success) {
toast.showMessage({
title: locale.baseText('testDefinition.list.testStarted'),
type: 'success',
message: h(
RouterLink,
{ to: { name: VIEWS.TEST_DEFINITION_EDIT, params: { testId } } },
() => 'Go to runs',
),
});
// Optionally fetch the updated test runs
await testDefinitionStore.fetchTestRuns(testId);
} else {
throw new Error('Test run failed to start');
}
} catch (error) {
toast.showError(error, locale.baseText('testDefinition.list.testStartError'));
}
}
async function onCancelTestRun(testId: string) {
try {
const testRunId = testDefinitionStore.lastRunByTestId[testId]?.id;
// FIXME: testRunId might be null for a short period of time between user clicking start and the test run being created and fetched. Just ignore it for now.
if (!testRunId) {
throw new Error('Failed to cancel test run');
}
const result = await testDefinitionStore.cancelTestRun(testId, testRunId);
if (result.success) {
toast.showMessage({
title: locale.baseText('testDefinition.list.testCancelled'),
type: 'success',
});
// Optionally fetch the updated test runs
await testDefinitionStore.fetchTestRuns(testId);
} else {
throw new Error('Failed to cancel test run');
}
} catch (error) {
toast.showError(error, locale.baseText('testDefinition.list.testStartError'));
}
}
function onEditTest(testId: string) {
void router.push({ name: VIEWS.TEST_DEFINITION_EDIT, params: { testId } });
}
async function onDeleteTest(testId: string) {
const deleteConfirmed = await confirm(
locale.baseText('testDefinition.deleteTest.warning'),
locale.baseText('testDefinition.deleteTest'),
{
type: 'warning',
confirmButtonText: locale.baseText('generic.delete'),
cancelButtonText: locale.baseText('generic.cancel'),
closeOnClickModal: true,
},
);
if (deleteConfirmed !== MODAL_CONFIRM) {
return;
}
await testDefinitionStore.deleteById(testId);
toast.showMessage({
title: locale.baseText('testDefinition.list.testDeleted'),
type: 'success',
});
}
</script>
<template>
<div :class="$style.container">
<N8nLoading v-if="isLoading" loading :rows="3" data-test-id="test-definition-loader" />
<EmptyState
v-else-if="!listItems.length"
data-test-id="test-definition-empty-state"
@create-test="onCreateTest"
/>
<template v-else>
<div :class="$style.header">
<N8nHeading size="xlarge" color="text-dark" bold>
{{ locale.baseText('testDefinition.list.tests') }}
</N8nHeading>
<div>
<N8nButton
:label="locale.baseText('testDefinition.list.createNew')"
class="mr-xs"
@click="onCreateTest"
/>
<N8nButton
:label="locale.baseText('testDefinition.list.runAll')"
disabled
type="secondary"
/>
</div>
</div>
<div :class="$style.testList" data-test-id="test-definition-list">
<TestItem
v-for="item in listItems"
:key="item.id"
:name="item.name"
:test-cases="item.testCases"
:execution="item.lastExecution"
:errors="item.setupErrors"
:data-test-id="`test-item-${item.id}`"
@click="onEditTest(item.id)"
>
<template #prepend>
<div @click.stop>
<N8nTooltip v-if="item.isTestRunning" content="Cancel test run" placement="top">
<N8nIconButton
icon="stop"
type="secondary"
size="mini"
@click="onCancelTestRun(item.id)"
/>
</N8nTooltip>
<N8nTooltip
v-else
:disabled="!Boolean(item.setupErrors.length)"
placement="top"
teleported
>
<template #content>
<div>{{ locale.baseText('testDefinition.completeConfig') }}</div>
<div v-for="issue in item.setupErrors" :key="issue.field">
- {{ issue.message }}
</div>
</template>
<N8nIconButton
icon="play"
type="secondary"
size="mini"
:disabled="Boolean(item.setupErrors.length)"
:data-test-id="`run-test-${item.id}`"
@click="onRunTest(item.id)"
/>
</N8nTooltip>
</div>
</template>
<template #append>
<div @click.stop>
<N8nActionToggle
:actions="actions"
:data-test-id="`test-actions-${item.id}`"
icon-orientation="horizontal"
@action="(action) => handleAction(action, item.id)"
>
</N8nActionToggle>
</div>
</template>
</TestItem>
</div>
</template>
</div>
</template>
<style module lang="scss">
.container {
height: 100%;
width: 100%;
max-width: 1184px;
margin: auto;
padding: var(--spacing-xl) var(--spacing-l);
}
.loading {
display: flex;
justify-content: center;
align-items: center;
height: 200px;
}
.testList {
display: flex;
flex-direction: column;
border: 1px solid var(--color-foreground-base);
border-radius: var(--border-radius-base);
// gap: 8px;
}
.header {
display: flex;
justify-content: space-between;
margin-bottom: 20px;
}
</style>

View File

@@ -1,87 +0,0 @@
<script lang="ts" setup>
import { useTestDefinitionForm } from '@/components/TestDefinition/composables/useTestDefinitionForm';
import { useTelemetry } from '@/composables/useTelemetry';
import { useToast } from '@/composables/useToast';
import { VIEWS } from '@/constants';
import { useExecutionsStore } from '@/stores/executions.store';
import { useRootStore } from '@n8n/stores/useRootStore';
import { useAnnotationTagsStore } from '@/stores/tags.store';
import { useTestDefinitionStore } from '@/stores/testDefinition.store.ee';
import { N8nLoading } from '@n8n/design-system';
import { useRoute, useRouter } from 'vue-router';
const props = defineProps<{
name: string;
}>();
const { state, createTest, updateTest } = useTestDefinitionForm();
const testDefinitionStore = useTestDefinitionStore();
const tagsStore = useAnnotationTagsStore();
const executionsStore = useExecutionsStore();
const toast = useToast();
const telemetry = useTelemetry();
const router = useRouter();
const route = useRoute();
function generateTagFromName(name: string): string {
let tag = name.toLowerCase().replace(/\s+/g, '_');
if (tag.length > 18) {
const start = tag.slice(0, 10);
const end = tag.slice(-8);
tag = `${start}..${end}`;
}
return tag;
}
async function createTag(tagName: string) {
try {
const newTag = await tagsStore.create(tagName, { incrementExisting: true });
return newTag;
} catch (error) {
toast.showError(error, 'Error', error.message);
throw error;
}
}
void createTest(props.name).then(async (test) => {
if (!test) {
// Fix ME
throw new Error('no test found');
}
const tag = generateTagFromName(test.name);
const testTag = await createTag(tag);
state.value.tags.value = [testTag.id];
if (typeof route.query?.executionId === 'string' && Array.isArray(route.query.annotationTags)) {
const newTags = [...(route.query.annotationTags as string[]), testTag.id];
await executionsStore.annotateExecution(route.query.executionId, { tags: newTags });
}
await updateTest(test.id);
testDefinitionStore.updateRunFieldIssues(test.id);
telemetry.track(
'User created test',
{
test_id: test.id,
workflow_id: props.name,
session_id: useRootStore().pushRef,
},
{
withPostHog: true,
},
);
await router.replace({
name: VIEWS.TEST_DEFINITION_EDIT,
params: { testId: test.id },
});
});
</script>
<template>
<N8nLoading loading :rows="3" />
</template>

View File

@@ -1,37 +0,0 @@
<script setup lang="ts">
import { useWorkflowHelpers } from '@/composables/useWorkflowHelpers';
import { useWorkflowsStore } from '@/stores/workflows.store';
import { useAsyncState } from '@vueuse/core';
import { useRouter } from 'vue-router';
const props = defineProps<{
name: string;
}>();
const router = useRouter();
const workflowHelpers = useWorkflowHelpers({ router });
const workflowStore = useWorkflowsStore();
const { isReady } = useAsyncState(async () => {
const workflowId = props.name;
const isAlreadyInitialized = workflowStore.workflow.id === workflowId;
if (isAlreadyInitialized) return;
const workflow = await workflowStore.fetchWorkflow(workflowId);
workflowHelpers.initState(workflow);
}, undefined);
</script>
<template>
<div :class="$style.evaluationsView">
<router-view v-if="isReady" />
</div>
</template>
<style module lang="scss">
.evaluationsView {
width: 100%;
height: 100%;
}
</style>

View File

@@ -1,409 +0,0 @@
<script setup lang="ts">
import type { TestCaseExecutionRecord } from '@/api/testDefinition.ee';
import type { TestTableColumn } from '@/components/TestDefinition/shared/TestTableBase.vue';
import TestTableBase from '@/components/TestDefinition/shared/TestTableBase.vue';
import { useI18n } from '@/composables/useI18n';
import { useToast } from '@/composables/useToast';
import { VIEWS } from '@/constants';
import type { BaseTextKey } from '@/plugins/i18n';
import { useTestDefinitionStore } from '@/stores/testDefinition.store.ee';
import { convertToDisplayDate } from '@/utils/typesUtils';
import { N8nActionToggle, N8nButton, N8nText, N8nTooltip } from '@n8n/design-system';
import { computed, onMounted, ref } from 'vue';
import { useRouter } from 'vue-router';
// TODO: replace with n8n-api type
const TEST_CASE_EXECUTION_ERROR_CODE = {
MOCKED_NODE_DOES_NOT_EXIST: 'MOCKED_NODE_DOES_NOT_EXIST',
TRIGGER_NO_LONGER_EXISTS: 'TRIGGER_NO_LONGER_EXISTS',
FAILED_TO_EXECUTE_WORKFLOW: 'FAILED_TO_EXECUTE_WORKFLOW',
EVALUATION_WORKFLOW_DOES_NOT_EXIST: 'EVALUATION_WORKFLOW_DOES_NOT_EXIST',
FAILED_TO_EXECUTE_EVALUATION_WORKFLOW: 'FAILED_TO_EXECUTE_EVALUATION_WORKFLOW',
INVALID_METRICS: 'INVALID_METRICS',
PAYLOAD_LIMIT_EXCEEDED: 'PAYLOAD_LIMIT_EXCEEDED',
UNKNOWN_ERROR: 'UNKNOWN_ERROR',
} as const;
type TestCaseExecutionErrorCodes =
(typeof TEST_CASE_EXECUTION_ERROR_CODE)[keyof typeof TEST_CASE_EXECUTION_ERROR_CODE];
const TEST_RUN_ERROR_CODES = {
PAST_EXECUTIONS_NOT_FOUND: 'PAST_EXECUTIONS_NOT_FOUND',
EVALUATION_WORKFLOW_NOT_FOUND: 'EVALUATION_WORKFLOW_NOT_FOUND',
INTERRUPTED: 'INTERRUPTED',
UNKNOWN_ERROR: 'UNKNOWN_ERROR',
} as const;
type TestRunErrorCode = (typeof TEST_RUN_ERROR_CODES)[keyof typeof TEST_RUN_ERROR_CODES];
const router = useRouter();
const toast = useToast();
const testDefinitionStore = useTestDefinitionStore();
const locale = useI18n();
const isLoading = ref(true);
const testCases = ref<TestCaseExecutionRecord[]>([]);
const runId = computed(() => router.currentRoute.value.params.runId as string);
const testId = computed(() => router.currentRoute.value.params.testId as string);
const run = computed(() => testDefinitionStore.testRunsById[runId.value]);
const test = computed(() => testDefinitionStore.testDefinitionsById[testId.value]);
const filteredTestCases = computed(() => {
return testCases.value;
});
const formattedTime = computed(() =>
convertToDisplayDate(new Date(run.value?.runAt).getTime()).split(' ').reverse(),
);
type Action = { label: string; value: string; disabled: boolean };
const rowActions = (row: TestCaseExecutionRecord): Action[] => {
return [
{
label: 'Original execution',
value: row.pastExecutionId,
disabled: !Boolean(row.pastExecutionId),
},
{
label: 'New Execution',
value: row.executionId,
disabled: !Boolean(row.executionId),
},
{
label: 'Evaluation Execution',
value: row.evaluationExecutionId,
disabled: !Boolean(row.evaluationExecutionId),
},
];
};
const gotToExecution = (executionId: string) => {
const { href } = router.resolve({
name: VIEWS.EXECUTION_PREVIEW,
params: {
name: test.value?.workflowId,
executionId,
},
});
window.open(href, '_blank');
};
const testCaseErrorDictionary: Partial<Record<TestCaseExecutionErrorCodes, BaseTextKey>> = {
MOCKED_NODE_DOES_NOT_EXIST: 'testDefinition.runDetail.error.mockedNodeMissing',
FAILED_TO_EXECUTE_EVALUATION_WORKFLOW: 'testDefinition.runDetail.error.evaluationFailed',
FAILED_TO_EXECUTE_WORKFLOW: 'testDefinition.runDetail.error.executionFailed',
TRIGGER_NO_LONGER_EXISTS: 'testDefinition.runDetail.error.triggerNoLongerExists',
INVALID_METRICS: 'testDefinition.runDetail.error.invalidMetrics',
} as const;
const testRunErrorDictionary: Partial<Record<TestRunErrorCode, BaseTextKey>> = {
PAST_EXECUTIONS_NOT_FOUND: 'testDefinition.listRuns.error.noPastExecutions',
EVALUATION_WORKFLOW_NOT_FOUND: 'testDefinition.listRuns.error.evaluationWorkflowNotFound',
} as const;
const getErrorBaseKey = (errorCode?: string): string =>
testCaseErrorDictionary[errorCode as TestCaseExecutionErrorCodes] ??
testRunErrorDictionary[errorCode as TestRunErrorCode] ??
'';
const getErrorTooltipLinkRoute = (row: TestCaseExecutionRecord) => {
if (row.errorCode === TEST_CASE_EXECUTION_ERROR_CODE.FAILED_TO_EXECUTE_EVALUATION_WORKFLOW) {
return {
name: VIEWS.EXECUTION_PREVIEW,
params: {
name: test.value?.evaluationWorkflowId,
executionId: row.evaluationExecutionId,
},
};
} else if (row.errorCode === TEST_CASE_EXECUTION_ERROR_CODE.MOCKED_NODE_DOES_NOT_EXIST) {
return {
name: VIEWS.TEST_DEFINITION_EDIT,
params: {
testId: testId.value,
},
};
} else if (row.errorCode === TEST_CASE_EXECUTION_ERROR_CODE.FAILED_TO_EXECUTE_WORKFLOW) {
return {
name: VIEWS.EXECUTION_PREVIEW,
params: {
name: test.value?.workflowId,
executionId: row.executionId,
},
};
} else if (row.errorCode === TEST_CASE_EXECUTION_ERROR_CODE.TRIGGER_NO_LONGER_EXISTS) {
return {
name: VIEWS.EXECUTION_PREVIEW,
params: {
name: test.value?.workflowId,
executionId: row.pastExecutionId,
},
};
} else if (row.errorCode === TEST_CASE_EXECUTION_ERROR_CODE.INVALID_METRICS) {
return {
name: VIEWS.EXECUTION_PREVIEW,
params: {
name: test.value?.evaluationWorkflowId,
executionId: row.evaluationExecutionId,
},
};
}
return undefined;
};
const columns = computed(
(): Array<TestTableColumn<TestCaseExecutionRecord>> => [
{
prop: 'id',
width: 250,
label: locale.baseText('testDefinition.runDetail.testCase'),
sortable: true,
formatter: (row: TestCaseExecutionRecord) => `${row.id}`,
},
{
prop: 'status',
label: locale.baseText('testDefinition.listRuns.status'),
},
...Object.keys(run.value?.metrics ?? {}).map((metric) => ({
prop: `metrics.${metric}`,
label: metric,
sortable: true,
filter: true,
showHeaderTooltip: true,
formatter: (row: TestCaseExecutionRecord) => row.metrics?.[metric]?.toFixed(2) ?? '-',
})),
],
);
const metrics = computed(() => run.value?.metrics ?? {});
// Temporary workaround to fetch test cases by manually getting workflow executions
const fetchExecutionTestCases = async () => {
if (!runId.value || !testId.value) return;
isLoading.value = true;
try {
await testDefinitionStore.fetchTestDefinition(testId.value);
const testRun = await testDefinitionStore.getTestRun({
testDefinitionId: testId.value,
runId: runId.value,
});
const testCaseEvaluationExecutions = await testDefinitionStore.fetchTestCaseExecutions({
testDefinitionId: testId.value,
runId: testRun.id,
});
testCases.value = testCaseEvaluationExecutions ?? [];
} catch (error) {
toast.showError(error, 'Failed to load run details');
} finally {
isLoading.value = false;
}
};
onMounted(async () => {
await fetchExecutionTestCases();
});
</script>
<template>
<div :class="$style.container" data-test-id="test-definition-run-detail">
<div :class="$style.header">
<button :class="$style.backButton" @click="router.back()">
<i class="mr-xs"><font-awesome-icon icon="arrow-left" /></i>
<n8n-heading size="large" :bold="true">{{ test?.name }}</n8n-heading>
<i class="ml-xs mr-xs"><font-awesome-icon icon="chevron-right" /></i>
<n8n-heading size="large" :bold="true">
{{ locale.baseText('testDefinition.listRuns.runNumber') }}{{ run?.id }}
</n8n-heading>
</button>
</div>
<el-scrollbar always :class="$style.scrollableSummary" class="mb-m">
<div style="display: flex">
<div :class="$style.summaryCard">
<N8nText size="small">
{{ locale.baseText('testDefinition.runDetail.totalCases') }}
</N8nText>
<N8nText size="xlarge" style="font-size: 32px" bold>{{ testCases.length }}</N8nText>
</div>
<div :class="$style.summaryCard">
<N8nText size="small">
{{ locale.baseText('testDefinition.runDetail.ranAt') }}
</N8nText>
<div>
<N8nText v-for="item in formattedTime" :key="item" size="medium" tag="div">
{{ item }}
</N8nText>
</div>
</div>
<div :class="$style.summaryCard">
<N8nText size="small">
{{ locale.baseText('testDefinition.listRuns.status') }}
</N8nText>
<N8nText size="large" :class="run?.status.toLowerCase()">
{{ run?.status }}
</N8nText>
</div>
<div v-for="(value, key) in metrics" :key="key" :class="$style.summaryCard">
<N8nTooltip :content="key" placement="top">
<N8nText size="small" style="text-overflow: ellipsis; overflow: hidden">
{{ key }}
</N8nText>
</N8nTooltip>
<N8nText size="xlarge" style="font-size: 32px" bold>{{ value.toFixed(2) }}</N8nText>
</div>
</div>
</el-scrollbar>
<div v-if="isLoading" :class="$style.loading">
<n8n-loading :loading="true" :rows="5" />
</div>
<TestTableBase
v-else
:data="filteredTestCases"
:columns="columns"
:default-sort="{ prop: 'id', order: 'descending' }"
>
<template #id="{ row }">
<div style="display: flex; justify-content: space-between; gap: 10px">
{{ row.id }}
<N8nActionToggle
:actions="rowActions(row)"
icon-orientation="horizontal"
@action="gotToExecution"
>
<N8nButton type="secondary">View</N8nButton>
</N8nActionToggle>
</div>
</template>
<template #status="{ row }">
<template v-if="row.status === 'error'">
<N8nTooltip placement="right" :show-after="300">
<template #content>
<template v-if="getErrorBaseKey(row.errorCode)">
<i18n-t :keypath="getErrorBaseKey(row.errorCode)">
<template #link>
<RouterLink :to="getErrorTooltipLinkRoute(row) ?? ''" target="_blank">
{{
locale.baseText(`${getErrorBaseKey(row.errorCode)}.solution` as BaseTextKey)
}}
</RouterLink>
</template>
</i18n-t>
</template>
<template v-else> UNKNOWN_ERROR </template>
</template>
<div style="display: inline-flex; gap: 8px; text-transform: capitalize">
<N8nIcon icon="exclamation-triangle" color="danger"></N8nIcon>
<N8nText size="small" color="danger">
{{ row.status }}
</N8nText>
</div>
</N8nTooltip>
</template>
</template>
</TestTableBase>
</div>
</template>
<style module lang="scss">
.container {
height: 100%;
width: 100%;
max-width: var(--content-container-width);
margin: auto;
padding: var(--spacing-l) var(--spacing-2xl) 0;
}
.backButton {
display: flex;
align-items: center;
gap: var(--spacing-s);
border: none;
background: none;
cursor: pointer;
color: var(--color-text-base);
}
.header {
display: flex;
align-items: center;
gap: var(--spacing-s);
margin-bottom: var(--spacing-l);
.timestamp {
color: var(--color-text-base);
font-size: var(--font-size-s);
}
}
.summary {
margin-bottom: var(--spacing-m);
.summaryStats {
display: flex;
gap: var(--spacing-l);
}
}
.stat {
display: flex;
flex-direction: column;
}
.controls {
display: flex;
gap: var(--spacing-s);
margin-bottom: var(--spacing-s);
}
.downloadButton {
margin-bottom: var(--spacing-s);
}
.loading {
display: flex;
justify-content: center;
align-items: center;
height: 200px;
}
.scrollableSummary {
border: var(--border-width-base) var(--border-style-base) var(--color-foreground-base);
border-radius: 5px;
background-color: var(--color-background-xlight);
:global(.el-scrollbar__bar) {
opacity: 1;
}
:global(.el-scrollbar__thumb) {
background-color: var(--color-foreground-base);
&:hover {
background-color: var(--color-foreground-dark);
}
}
}
.summaryCard {
padding: var(--spacing-s);
border-right: var(--border-width-base) var(--border-style-base) var(--color-foreground-base);
flex-basis: 169px;
flex-shrink: 0;
max-width: 170px;
display: flex;
flex-direction: column;
justify-content: space-between;
&:first-child {
border-top-left-radius: inherit;
border-bottom-left-radius: inherit;
}
}
</style>

View File

@@ -1,110 +0,0 @@
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import { createTestingPinia } from '@pinia/testing';
import { createComponentRenderer } from '@/__tests__/render';
import TestDefinitionEditView from '@/views/TestDefinition/TestDefinitionEditView.vue';
import type { useTestDefinitionForm } from '@/components/TestDefinition/composables/useTestDefinitionForm';
import { ref } from 'vue';
import { cleanupAppModals, createAppModals, mockedStore } from '@/__tests__/utils';
import { useTestDefinitionStore } from '@/stores/testDefinition.store.ee';
import userEvent from '@testing-library/user-event';
const form: Partial<ReturnType<typeof useTestDefinitionForm>> = {
state: ref({
name: { value: '', isEditing: false, tempValue: '' },
description: { value: '', isEditing: false, tempValue: '' },
tags: { value: [], tempValue: [], isEditing: false },
evaluationWorkflow: { mode: 'list', value: '', __rl: true },
mockedNodes: [],
}),
loadTestData: vi.fn(),
cancelEditing: vi.fn(),
updateTest: vi.fn(),
startEditing: vi.fn(),
saveChanges: vi.fn(),
createTest: vi.fn(),
};
vi.mock('@/components/TestDefinition/composables/useTestDefinitionForm', () => ({
useTestDefinitionForm: () => form,
}));
const renderComponent = createComponentRenderer(TestDefinitionEditView, {
props: { testId: '1', name: 'workflow-name' },
});
describe('TestDefinitionEditView', () => {
beforeEach(() => {
createTestingPinia();
createAppModals();
});
afterEach(() => {
vi.clearAllMocks();
cleanupAppModals();
});
it('should load test data', async () => {
renderComponent();
expect(form.loadTestData).toHaveBeenCalledWith('1', 'workflow-name');
});
it('should display disabled "run test" button when editing test without tags', async () => {
const testDefinitionStore = mockedStore(useTestDefinitionStore);
testDefinitionStore.getFieldIssues.mockReturnValueOnce([
{ field: 'tags', message: 'Tag is required' },
]);
const { getByTestId } = renderComponent();
const updateButton = getByTestId('run-test-button');
expect(updateButton.textContent?.toLowerCase()).toContain('run test');
expect(updateButton).toHaveClass('disabled');
});
it('should apply "has-issues" class to inputs with issues', async () => {
const testDefinitionStore = mockedStore(useTestDefinitionStore);
testDefinitionStore.getFieldIssues.mockReturnValueOnce([
{ field: 'evaluationWorkflow', message: 'No evaluation workflow set' },
]);
const { container } = renderComponent();
const issueElements = container.querySelectorAll('.has-issues');
expect(issueElements.length).toBeGreaterThan(0);
});
describe('Test Runs functionality', () => {
it('should display test runs table when runs exist', async () => {
const testDefinitionStore = mockedStore(useTestDefinitionStore);
testDefinitionStore.testRunsById = {
run1: {
id: 'run1',
testDefinitionId: '1',
status: 'completed',
runAt: '2023-01-01',
createdAt: '2023-01-01',
updatedAt: '2023-01-01',
completedAt: '2023-01-01',
},
};
const { getByTestId } = renderComponent();
expect(getByTestId('past-runs-table')).toBeInTheDocument();
});
it('should not display test runs table when no runs exist', async () => {
const { queryByTestId } = renderComponent();
expect(queryByTestId('past-runs-table')).not.toBeInTheDocument();
});
it('should start a test run when run test button is clicked', async () => {
const testDefinitionStore = mockedStore(useTestDefinitionStore);
const { getByTestId } = renderComponent();
await userEvent.click(getByTestId('run-test-button'));
expect(testDefinitionStore.startTestRun).toHaveBeenCalledWith('1');
expect(testDefinitionStore.fetchTestRuns).toHaveBeenCalledWith('1');
});
});
});

View File

@@ -1,184 +0,0 @@
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import { createTestingPinia } from '@pinia/testing';
import { createComponentRenderer } from '@/__tests__/render';
import TestDefinitionListView from '@/views/TestDefinition/TestDefinitionListView.vue';
import type { useToast } from '@/composables/useToast';
import type { useMessage } from '@/composables/useMessage';
import { useTestDefinitionStore } from '@/stores/testDefinition.store.ee';
import { mockedStore } from '@/__tests__/utils';
import { MODAL_CONFIRM } from '@/constants';
import type { TestDefinitionRecord } from '@/api/testDefinition.ee';
import userEvent from '@testing-library/user-event';
import { within, waitFor } from '@testing-library/dom';
const renderComponent = createComponentRenderer(TestDefinitionListView);
const workflowId = 'workflow1';
const mockTestDefinitions: TestDefinitionRecord[] = [
{
id: '1',
name: 'Test 1',
workflowId,
updatedAt: '2023-01-01T00:00:00.000Z',
createdAt: '2023-01-01T00:00:00.000Z',
annotationTagId: 'tag1',
},
{
id: '2',
name: 'Test 2',
workflowId,
updatedAt: '2023-01-02T00:00:00.000Z',
createdAt: '2023-01-01T00:00:00.000Z',
},
{
id: '3',
name: 'Test 3',
workflowId,
updatedAt: '2023-01-03T00:00:00.000Z',
createdAt: '2023-01-01T00:00:00.000Z',
},
];
const toast = vi.hoisted(
() =>
({
showMessage: vi.fn(),
showError: vi.fn(),
}) satisfies Partial<ReturnType<typeof useToast>>,
);
vi.mock('@/composables/useToast', () => ({
useToast: () => toast,
}));
const message = vi.hoisted(
() =>
({
confirm: vi.fn(),
}) satisfies Partial<ReturnType<typeof useMessage>>,
);
vi.mock('@/composables/useMessage', () => ({
useMessage: () => message,
}));
describe('TestDefinitionListView', () => {
beforeEach(() => {
createTestingPinia();
});
afterEach(() => {
vi.clearAllMocks();
});
it('should render loader', async () => {
const { getByTestId } = renderComponent({ props: { name: 'any' } });
expect(getByTestId('test-definition-loader')).toBeTruthy();
});
it('should render empty state when no tests exist', async () => {
const testDefinitionStore = mockedStore(useTestDefinitionStore);
testDefinitionStore.allTestDefinitionsByWorkflowId = {};
const { getByTestId } = renderComponent({ props: { name: 'any' } });
await waitFor(() => expect(getByTestId('test-definition-empty-state')).toBeTruthy());
});
it('should render tests list when tests exist', async () => {
const testDefinitionStore = mockedStore(useTestDefinitionStore);
testDefinitionStore.allTestDefinitionsByWorkflowId[workflowId] = mockTestDefinitions;
const { getByTestId } = renderComponent({ props: { name: workflowId } });
await waitFor(() => expect(getByTestId('test-definition-list')).toBeTruthy());
});
it('should load initial base on route param', async () => {
const testDefinitionStore = mockedStore(useTestDefinitionStore);
renderComponent({ props: { name: workflowId } });
expect(testDefinitionStore.fetchAll).toHaveBeenCalledWith({ workflowId });
});
it('should start test run and show success message', async () => {
const testDefinitionStore = mockedStore(useTestDefinitionStore);
testDefinitionStore.allTestDefinitionsByWorkflowId[workflowId] = mockTestDefinitions;
testDefinitionStore.startTestRun.mockResolvedValueOnce({ success: true });
const { getByTestId } = renderComponent({ props: { name: workflowId } });
await waitFor(() => expect(getByTestId('test-definition-list')).toBeTruthy());
const testToRun = mockTestDefinitions[0].id;
await userEvent.click(getByTestId(`run-test-${testToRun}`));
expect(testDefinitionStore.startTestRun).toHaveBeenCalledWith(testToRun);
expect(toast.showMessage).toHaveBeenCalledWith(expect.objectContaining({ type: 'success' }));
expect(testDefinitionStore.fetchTestRuns).toHaveBeenCalledWith(testToRun);
});
it('should show error message on failed test run', async () => {
const testDefinitionStore = mockedStore(useTestDefinitionStore);
testDefinitionStore.allTestDefinitionsByWorkflowId[workflowId] = mockTestDefinitions;
testDefinitionStore.startTestRun.mockRejectedValueOnce(new Error('Run failed'));
const { getByTestId } = renderComponent({ props: { name: workflowId } });
await waitFor(() => expect(getByTestId('test-definition-list')).toBeTruthy());
const testToRun = mockTestDefinitions[0].id;
await userEvent.click(getByTestId(`run-test-${testToRun}`));
expect(toast.showError).toHaveBeenCalledWith(expect.any(Error), expect.any(String));
});
it('should delete test and show success message', async () => {
const testDefinitionStore = mockedStore(useTestDefinitionStore);
testDefinitionStore.allTestDefinitionsByWorkflowId[workflowId] = mockTestDefinitions;
message.confirm.mockResolvedValueOnce(MODAL_CONFIRM);
const { getByTestId, queryByTestId } = renderComponent({
props: { name: workflowId },
});
await waitFor(() => expect(getByTestId('test-definition-list')).toBeTruthy());
const testToDelete = mockTestDefinitions[0].id;
const trigger = getByTestId(`test-actions-${testToDelete}`);
await userEvent.click(trigger);
const dropdownId = within(trigger).getByRole('button').getAttribute('aria-controls');
const dropdown = document.querySelector(`#${dropdownId}`);
expect(dropdown).toBeInTheDocument();
await userEvent.click(await within(dropdown as HTMLElement).findByText('Delete'));
expect(testDefinitionStore.deleteById).toHaveBeenCalledWith(testToDelete);
expect(toast.showMessage).toHaveBeenCalledWith(expect.objectContaining({ type: 'success' }));
/**
* since the actions are mocked by default,
* double check the UI updates correctly
*/
testDefinitionStore.allTestDefinitionsByWorkflowId = {
[workflowId]: [mockTestDefinitions[1], mockTestDefinitions[2]],
};
await waitFor(() =>
expect(queryByTestId(`test-actions-${testToDelete}`)).not.toBeInTheDocument(),
);
});
it('should sort tests by updated date in descending order', async () => {
const testDefinitionStore = mockedStore(useTestDefinitionStore);
testDefinitionStore.allTestDefinitionsByWorkflowId[workflowId] = mockTestDefinitions;
const { container, getByTestId } = renderComponent({ props: { name: workflowId } });
await waitFor(() => expect(getByTestId('test-definition-list')).toBeTruthy());
const testItems = container.querySelectorAll('[data-test-id^="test-item-"]');
expect(testItems[0].getAttribute('data-test-id')).toBe('test-item-3');
expect(testItems[1].getAttribute('data-test-id')).toBe('test-item-2');
expect(testItems[2].getAttribute('data-test-id')).toBe('test-item-1');
});
});

View File

@@ -1,88 +0,0 @@
import { describe, it, expect, vi, beforeEach, afterEach, type Mock } from 'vitest';
import { createTestingPinia } from '@pinia/testing';
import { createComponentRenderer } from '@/__tests__/render';
import TestDefinitionNewView from '@/views/TestDefinition/TestDefinitionNewView.vue';
import { ref } from 'vue';
import { mockedStore } from '@/__tests__/utils';
import { useTestDefinitionStore } from '@/stores/testDefinition.store.ee';
import { useAnnotationTagsStore } from '@/stores/tags.store';
import { useRoute } from 'vue-router';
import { useExecutionsStore } from '@/stores/executions.store';
import { waitFor } from '@testing-library/vue';
const workflowId = 'workflow_id';
const testId = 'test_id';
const mockedForm = {
state: ref({ tags: { value: [] }, name }),
createTest: vi.fn().mockResolvedValue({
id: testId,
name: 'test_name',
workflowId,
createdAt: '',
}),
updateTest: vi.fn().mockResolvedValue({}),
};
vi.mock('@/components/TestDefinition/composables/useTestDefinitionForm', () => ({
useTestDefinitionForm: vi.fn().mockImplementation(() => mockedForm),
}));
const mockReplace = vi.fn();
vi.mock('vue-router', async (importOriginal) => ({
// eslint-disable-next-line @typescript-eslint/consistent-type-imports
...(await importOriginal<typeof import('vue-router')>()),
useRoute: vi.fn().mockReturnValue({}),
useRouter: vi.fn(() => ({
replace: mockReplace,
})),
}));
describe('TestDefinitionRootView', () => {
const renderComponent = createComponentRenderer(TestDefinitionNewView);
beforeEach(() => {
createTestingPinia();
});
afterEach(() => {
vi.clearAllMocks();
});
it('should create a test adn redirect', async () => {
const testDefinitionStore = mockedStore(useTestDefinitionStore);
const annotationTagsStore = mockedStore(useAnnotationTagsStore);
annotationTagsStore.create.mockResolvedValueOnce({ id: 'tag_id', name: 'tag_name' });
renderComponent({ props: { name: workflowId } });
expect(mockedForm.createTest).toHaveBeenCalledWith(workflowId);
await waitFor(() =>
expect(testDefinitionStore.updateRunFieldIssues).toHaveBeenCalledWith(testId),
);
expect(mockReplace).toHaveBeenCalledWith(
expect.objectContaining({
params: {
testId,
},
}),
);
});
it('should assign an execution to a test', async () => {
(useRoute as Mock).mockReturnValue({
query: { executionId: 'execution_id', annotationTags: ['2', '3'] },
});
const annotationTagsStore = mockedStore(useAnnotationTagsStore);
const executionsStore = mockedStore(useExecutionsStore);
annotationTagsStore.create.mockResolvedValueOnce({ id: 'tag_id', name: 'tag_name' });
renderComponent({ props: { name: workflowId } });
await waitFor(() =>
expect(executionsStore.annotateExecution).toHaveBeenCalledWith('execution_id', {
tags: ['2', '3', 'tag_id'],
}),
);
});
});

View File

@@ -1,63 +0,0 @@
import { describe, it, expect, beforeEach } from 'vitest';
import { createTestingPinia } from '@pinia/testing';
import { createComponentRenderer } from '@/__tests__/render';
import TestDefinitionRootView from '../TestDefinitionRootView.vue';
import { useWorkflowsStore } from '@/stores/workflows.store';
import { mockedStore } from '@/__tests__/utils';
import type { IWorkflowDb } from '@/Interface';
import { waitFor } from '@testing-library/vue';
describe('TestDefinitionRootView', () => {
const renderComponent = createComponentRenderer(TestDefinitionRootView);
const mockWorkflow: IWorkflowDb = {
id: 'different-id',
name: 'Test Workflow',
active: false,
isArchived: false,
createdAt: Date.now(),
updatedAt: Date.now(),
nodes: [],
connections: {},
settings: {
executionOrder: 'v1',
},
tags: [],
pinData: {},
versionId: '',
usedCredentials: [],
};
beforeEach(() => {
createTestingPinia();
});
it('should initialize workflow on mount if not already initialized', async () => {
const workflowsStore = mockedStore(useWorkflowsStore);
workflowsStore.workflow = mockWorkflow;
const newWorkflowId = 'workflow123';
renderComponent({ props: { name: newWorkflowId } });
expect(workflowsStore.fetchWorkflow).toHaveBeenCalledWith(newWorkflowId);
});
it('should not initialize workflow if already loaded', async () => {
const workflowsStore = mockedStore(useWorkflowsStore);
workflowsStore.workflow = mockWorkflow;
renderComponent({ props: { name: mockWorkflow.id } });
expect(workflowsStore.fetchWorkflow).not.toHaveBeenCalled();
});
it('should render router view', async () => {
const workflowsStore = mockedStore(useWorkflowsStore);
workflowsStore.fetchWorkflow.mockResolvedValue(mockWorkflow);
const { container } = renderComponent({ props: { name: mockWorkflow.id } });
await waitFor(() => expect(container.querySelector('router-view')).toBeTruthy());
});
});