test: Refactor and clean up perf tests (#19324)

This commit is contained in:
Declan Carroll
2025-09-15 07:48:33 +01:00
committed by GitHub
parent 18d91b614b
commit e4ccf527f2
2 changed files with 24 additions and 183 deletions

View File

@@ -1,41 +1,34 @@
/**
* Large Node Performance Tests with Cloud Resource Constraints
*
* These tests use @cloud-* tags to automatically create resource-limited containers
* that simulate n8n Cloud plan constraints.
*/
import { test, expect } from '../../fixtures/cloud';
import type { n8nPage } from '../../pages/n8nPage';
import { measurePerformance, attachMetric } from '../../utils/performance-helper';
async function setupPerformanceTest(n8n: n8nPage, size: number) {
await n8n.goHome();
await n8n.workflows.clickNewWorkflowCard();
await n8n.canvas.importWorkflow('large.json', 'Large Workflow');
await n8n.start.fromImportedWorkflow('large.json');
await n8n.notifications.closeNotificationByText('Successful');
// Configure data size
await n8n.canvas.openNode('Edit Fields');
await n8n.page
.getByTestId('parameter-input-value')
.getByTestId('parameter-input-field')
.fill(size.toString());
await n8n.ndv.fillParameterInputByName('value', size.toString());
await n8n.ndv.clickBackToCanvasButton();
}
test.describe('Large Node Performance - Cloud Resources', () => {
test('Large workflow with starter plan resources @cloud:starter', async ({ n8n }, testInfo) => {
await setupPerformanceTest(n8n, 30000);
const loopSize = 20;
test.describe('Large Data Size Performance - Cloud Resources', () => {
test('Code Node with 30000 items @cloud:starter', async ({ n8n }, testInfo) => {
const itemCount = 30000;
await setupPerformanceTest(n8n, itemCount);
const workflowExecuteBudget = 10_000;
const openNodeBudget = 600;
const loopSize = 30;
const stats = [];
await n8n.workflowComposer.executeWorkflowAndWaitForNotification(
'Workflow executed successfully',
{
timeout: 30000,
},
);
const triggerDuration = await measurePerformance(n8n.page, 'trigger-workflow', async () => {
await n8n.workflowComposer.executeWorkflowAndWaitForNotification(
'Workflow executed successfully',
{
// Add buffer, we still assert at the end and expect less than the budget
timeout: workflowExecuteBudget + 5000,
},
);
});
for (let i = 0; i < loopSize; i++) {
const openNodeDuration = await measurePerformance(n8n.page, `open-node-${i}`, async () => {
@@ -44,32 +37,15 @@ test.describe('Large Node Performance - Cloud Resources', () => {
stats.push(openNodeDuration);
await n8n.ndv.clickBackToCanvasButton();
console.log(`✓ Open node (${i + 1} of ${loopSize}): ${openNodeDuration.toFixed(1)}ms`);
}
const average = stats.reduce((a, b) => a + b, 0) / stats.length;
console.log(`Average open node duration: ${average.toFixed(1)}ms`);
// Attach performance metric using helper method
await attachMetric(testInfo, 'open-node-30000', average, 'ms');
await attachMetric(testInfo, `open-node-${itemCount}`, average, 'ms');
await attachMetric(testInfo, `trigger-workflow-${itemCount}`, triggerDuration, 'ms');
expect(average).toBeLessThan(5000);
expect.soft(average, `Open node duration for ${itemCount} items`).toBeLessThan(openNodeBudget);
expect
.soft(triggerDuration, `Trigger workflow duration for ${itemCount} items`)
.toBeLessThan(workflowExecuteBudget);
});
});
/*
Usage:
# Run all performance tests (including cloud resource tests)
pnpm --filter n8n-playwright test:performance
# Run only cloud resource tests
pnpm --filter n8n-playwright test --grep "@cloud:"
# Run specific cloud plan tests
pnpm --filter n8n-playwright test --grep "@cloud:trial"
pnpm --filter n8n-playwright test --grep "@cloud:enterprise"
# Run this specific file (cloud resource tests only)
pnpm --filter n8n-playwright test tests/performance/large-node-cloud.spec.ts
*/

View File

@@ -1,135 +0,0 @@
import { test, expect } from '../../fixtures/base';
import type { n8nPage } from '../../pages/n8nPage';
import {
getAllPerformanceMetrics,
measurePerformance,
attachMetric,
} from '../../utils/performance-helper';
async function setupPerformanceTest(n8n: n8nPage, size: number) {
await n8n.start.fromNewProjectBlankCanvas();
await n8n.canvas.importWorkflow('large.json', 'Large Workflow');
await n8n.notifications.closeNotificationByText('Successful');
// Configure data size
await n8n.canvas.openNode('Edit Fields');
await n8n.page
.getByTestId('parameter-input-value')
.getByTestId('parameter-input-field')
.fill(size.toString());
await n8n.ndv.clickBackToCanvasButton();
}
test.describe('Performance Example: Multiple sets}', () => {
const testData = [
{
size: 30000,
timeout: 40000,
budgets: {
triggerWorkflow: 8000, // 8s budget (actual: 6.4s)
openLargeNode: 2500, // 2.5s budget (actual: 1.6s)
},
},
{
size: 60000,
timeout: 60000,
budgets: {
triggerWorkflow: 15000, // 15s budget (actual: 12.4s)
openLargeNode: 6000, // 6s budget (actual: 4.9s)
},
},
];
testData.forEach(({ size, timeout, budgets }) => {
test(`workflow performance - ${size.toLocaleString()} items`, async ({ n8n }) => {
test.setTimeout(timeout);
// Setup workflow
await setupPerformanceTest(n8n, size);
// Measure workflow execution
const triggerDuration = await measurePerformance(n8n.page, 'trigger-workflow', async () => {
await n8n.workflowComposer.executeWorkflowAndWaitForNotification('Successful', {
timeout: budgets.triggerWorkflow + 5000, // Add buffer
});
});
await attachMetric(test.info(), `trigger-workflow-${size}`, triggerDuration, 'ms');
// Assert trigger performance
expect(triggerDuration).toBeLessThan(budgets.triggerWorkflow);
console.log(
`✓ Trigger workflow (${size} items): ${triggerDuration.toFixed(1)}ms < ${budgets.triggerWorkflow}ms`,
);
// Measure node opening
const openNodeDuration = await measurePerformance(n8n.page, 'open-large-node', async () => {
await n8n.canvas.openNode('Code');
});
// Attach performance metric using helper method
await attachMetric(test.info(), `open-large-node-${size}`, openNodeDuration, 'ms');
// Assert node opening performance
expect(openNodeDuration).toBeLessThan(budgets.openLargeNode);
console.log(
`✓ Open node (${size} items): ${openNodeDuration.toFixed(1)}ms < ${budgets.openLargeNode}ms`,
);
// Get all metrics and attach to test report
const allMetrics = await getAllPerformanceMetrics(n8n.page);
console.log(`\nAll performance metrics for ${size.toLocaleString()} items:`, allMetrics);
// Attach metrics to test report
await test.info().attach('performance-metrics', {
body: JSON.stringify(
{
dataSize: size,
metrics: allMetrics,
budgets,
passed: {
triggerWorkflow: triggerDuration < budgets.triggerWorkflow,
openNode: openNodeDuration < budgets.openLargeNode,
},
},
null,
2,
),
contentType: 'application/json',
});
});
});
});
test('Performance Example: Multiple Loops in a single test', async ({ n8n }) => {
await setupPerformanceTest(n8n, 30000);
const loopSize = 20;
const stats = [];
await n8n.workflowComposer.executeWorkflowAndWaitForNotification('Successful');
for (let i = 0; i < loopSize; i++) {
// Measure node opening
const openNodeDuration = await measurePerformance(n8n.page, `open-node-${i}`, async () => {
await n8n.canvas.openNode('Code');
});
stats.push(openNodeDuration);
await n8n.ndv.clickBackToCanvasButton();
console.log(`✓ Open node (${i + 1} of ${loopSize}): ${openNodeDuration.toFixed(1)}ms`);
}
// Get the average of the stats
const average = stats.reduce((a, b) => a + b, 0) / stats.length;
console.log(`Average open node duration: ${average.toFixed(1)}ms`);
expect(average).toBeLessThan(2000);
});
test('Performance Example: Aserting on a performance metric', async ({ n8n }) => {
await setupPerformanceTest(n8n, 30000);
await n8n.workflowComposer.executeWorkflowAndWaitForNotification('Successful');
const openNodeDuration = await measurePerformance(n8n.page, 'open-node', async () => {
await n8n.canvas.openNode('Code');
});
expect(openNodeDuration).toBeLessThan(2000);
});