diff --git a/packages/cli/src/__tests__/manual-execution.service.test.ts b/packages/cli/src/__tests__/manual-execution.service.test.ts index c7bcd76acf..43d14f0286 100644 --- a/packages/cli/src/__tests__/manual-execution.service.test.ts +++ b/packages/cli/src/__tests__/manual-execution.service.test.ts @@ -597,4 +597,36 @@ describe('ManualExecutionService', () => { ); }); }); + + it('should call workflowExecute.run for full execution when execution mode is evaluation', async () => { + const data = mock({ + executionMode: 'evaluation', + destinationNode: undefined, + pinData: {}, + runData: {}, + triggerToStartFrom: undefined, + }); + + const workflow = mock({ + getNode: jest.fn().mockReturnValue(null), + getTriggerNodes: jest.fn().mockReturnValue([]), + }); + + const additionalData = mock(); + const executionId = 'test-execution-id-evaluation'; + + const mockRun = jest.fn().mockReturnValue('mockRunReturnEvaluation'); + require('n8n-core').WorkflowExecute.mockImplementationOnce(() => ({ + run: mockRun, + processRunExecutionData: jest.fn(), + })); + + await manualExecutionService.runManually(data, workflow, additionalData, executionId); + + expect(mockRun.mock.calls[0][0]).toBe(workflow); + expect(mockRun.mock.calls[0][1]).toBeUndefined(); // startNode + expect(mockRun.mock.calls[0][2]).toBeUndefined(); // destinationNode + expect(mockRun.mock.calls[0][3]).toBe(data.pinData); // pinData + expect(mockRun.mock.calls[0][4]).toBeUndefined(); // triggerToStartFrom + }); }); diff --git a/packages/cli/src/evaluation.ee/test-runner/__tests__/test-runner.service.ee.test.ts b/packages/cli/src/evaluation.ee/test-runner/__tests__/test-runner.service.ee.test.ts index e8aeddcfbc..5c5056e903 100644 --- a/packages/cli/src/evaluation.ee/test-runner/__tests__/test-runner.service.ee.test.ts +++ b/packages/cli/src/evaluation.ee/test-runner/__tests__/test-runner.service.ee.test.ts @@ -3,6 +3,7 @@ import type { TestCaseExecutionRepository } from '@n8n/db'; import type { TestRunRepository } from '@n8n/db'; import type { WorkflowRepository } from '@n8n/db'; import { readFileSync } from 'fs'; +import type { Mock } from 'jest-mock'; import { mock } from 'jest-mock-extended'; import type { ErrorReporter } from 'n8n-core'; import { EVALUATION_NODE_TYPE, EVALUATION_TRIGGER_NODE_TYPE } from 'n8n-workflow'; @@ -11,6 +12,7 @@ import type { IRun, ExecutionError } from 'n8n-workflow'; import path from 'path'; import type { ActiveExecutions } from '@/active-executions'; +import config from '@/config'; import { TestRunError } from '@/evaluation.ee/test-runner/errors.ee'; import { LoadNodesAndCredentials } from '@/load-nodes-and-credentials'; import type { Telemetry } from '@/telemetry'; @@ -679,6 +681,92 @@ describe('TestRunnerService', () => { abortController.signal.addEventListener = originalAddEventListener; } }); + + describe('runTestCase - Queue Mode', () => { + beforeEach(() => { + // Mock config to return 'queue' mode + jest.spyOn(config, 'getEnv').mockImplementation((key) => { + if (key === 'executions.mode') { + return 'queue'; + } + return undefined; + }); + }); + + afterEach(() => { + (config.getEnv as unknown as Mock).mockRestore(); + }); + + test('should call workflowRunner.run with correct data in queue mode', async () => { + // Setup test data + const triggerNodeName = 'TriggerNode'; + const workflow = mock({ + nodes: [ + { + id: 'node1', + name: triggerNodeName, + type: EVALUATION_TRIGGER_NODE_TYPE, + typeVersion: 1, + position: [0, 0], + parameters: {}, + }, + ], + connections: {}, + }); + + const metadata = { + testRunId: 'test-run-id', + userId: 'user-id', + }; + + const testCase = { json: { id: 1, name: 'Test 1' } }; + const abortController = new AbortController(); + + // Call the method + await (testRunnerService as any).runTestCase( + workflow, + metadata, + testCase, + abortController.signal, + ); + + // Verify workflowRunner.run was called with the correct data + expect(workflowRunner.run).toHaveBeenCalledTimes(1); + + const runCallArg = workflowRunner.run.mock.calls[0][0]; + + // Verify the expected structure for queue mode + expect(runCallArg).toEqual( + expect.objectContaining({ + executionMode: 'evaluation', + pinData: { + [triggerNodeName]: [testCase], + }, + workflowData: workflow, + userId: metadata.userId, + partialExecutionVersion: 2, + triggerToStartFrom: { + name: triggerNodeName, + }, + executionData: { + resultData: { + pinData: { + [triggerNodeName]: [testCase], + }, + runData: {}, + }, + manualData: { + userId: metadata.userId, + partialExecutionVersion: 2, + triggerToStartFrom: { + name: triggerNodeName, + }, + }, + }, + }), + ); + }); + }); }); describe('validateSetMetricsNodes', () => { diff --git a/packages/cli/src/evaluation.ee/test-runner/test-runner.service.ee.ts b/packages/cli/src/evaluation.ee/test-runner/test-runner.service.ee.ts index f4132993ce..affc228349 100644 --- a/packages/cli/src/evaluation.ee/test-runner/test-runner.service.ee.ts +++ b/packages/cli/src/evaluation.ee/test-runner/test-runner.service.ee.ts @@ -201,11 +201,8 @@ export class TestRunnerService { // the same way as it would be passed in manual mode if (config.getEnv('executions.mode') === 'queue') { data.executionData = { - startData: { - // startNodes: startNodesData.startNodes, - }, resultData: { - // pinData, + pinData, runData: {}, }, manualData: { diff --git a/packages/cli/src/manual-execution.service.ts b/packages/cli/src/manual-execution.service.ts index 7a3df1ce17..a838fddf5f 100644 --- a/packages/cli/src/manual-execution.service.ts +++ b/packages/cli/src/manual-execution.service.ts @@ -113,7 +113,8 @@ export class ManualExecutionService { return workflowExecute.processRunExecutionData(workflow); } else if ( data.runData === undefined || - (data.partialExecutionVersion !== 2 && (!data.startNodes || data.startNodes.length === 0)) + (data.partialExecutionVersion !== 2 && (!data.startNodes || data.startNodes.length === 0)) || + data.executionMode === 'evaluation' ) { // Full Execution // TODO: When the old partial execution logic is removed this block can