feat: Add performance plan presets for testcontainers (#18231)

This commit is contained in:
shortstacked
2025-08-15 16:17:55 +01:00
committed by GitHub
parent 1ddb10c3c8
commit 726f0ff37a
12 changed files with 450 additions and 14 deletions

View File

@@ -13,6 +13,9 @@ pnpm stack --postgres
# Start in queue mode (with Redis + PostgreSQL)
pnpm stack --queue
# Start with starter performance plan constraints
pnpm stack:starter
```
When started, you'll see:
@@ -28,6 +31,17 @@ pnpm run stack # SQLite
pnpm run stack:postgres # PostgreSQL
pnpm run stack:queue # Queue mode
pnpm run stack:multi-main # Multiple main instances
pnpm run stack:starter # Starter performance plan
```
### Performance Plan Presets
```bash
# Use predefined performance plans (simulates cloud constraints, differs from cloud CPU wise due to non burstable docker)
pnpm stack --plan trial # Trial: 0.75GB RAM, 0.2 CPU (SQLite only)
pnpm stack --plan starter # Starter: 0.75GB RAM, 0.2 CPU (SQLite only)
pnpm stack --plan pro-1 # Pro-1: 1.25GB RAM, 0.5 CPU (SQLite only)
pnpm stack --plan pro-2 # Pro-2: 2.5GB RAM, 0.75 CPU (SQLite only)
pnpm stack --plan enterprise # Enterprise: 8GB RAM, 1.0 CPU (SQLite only)
```
### Queue Mode with Scaling
@@ -81,6 +95,14 @@ const stack = await createN8NStack({
queueMode: { mains: 2, workers: 3 }
});
// Resource-constrained container (simulating cloud plans)
const stack = await createN8NStack({
resourceQuota: {
memory: 0.375, // 384MB RAM
cpu: 0.25 // 250 millicore CPU
}
});
// Use the stack
console.log(`n8n available at: ${stack.baseUrl}`);
@@ -98,6 +120,38 @@ await stack.stop();
| `--workers <n>` | Number of worker instances (requires queue mode) | `--workers 5` |
| `--name <name>` | Custom project name for parallel runs | `--name my-test` |
| `--env KEY=VALUE` | Set environment variables | `--env N8N_LOG_LEVEL=debug` |
| `--plan <plan>` | Use performance plan preset | `--plan starter` |
## Performance Plans
Simulate cloud plan resource constraints for testing. **Performance plans are SQLite-only** (like cloud n8n):
```bash
# CLI usage
pnpm stack --plan trial # 0.375GB RAM, 0.2 CPU cores
pnpm stack --plan starter # 0.375GB RAM, 0.2 CPU cores
pnpm stack --plan pro-1 # 0.625GB RAM, 0.5 CPU cores
pnpm stack --plan pro-2 # 1.25GB RAM, 0.75 CPU cores
pnpm stack --plan enterprise # 4GB RAM, 1.0 CPU cores
```
**Common Cloud Plan Quotas:**
- **Trial/Starter**: 0.375GB RAM, 0.2 CPU cores
- **Pro-1**: 0.625GB RAM, 0.5 CPU cores
- **Pro-2**: 1.25GB RAM, 0.75 CPU cores
- **Enterprise**: 4GB RAM, 1.0 CPU cores
Resource quotas are applied using Docker's `--memory` and `--cpus` flags for realistic cloud simulation.
## Package.json Scripts
| Script | Description | Equivalent CLI |
|--------|-------------|----------------|
| `stack` | Basic SQLite instance | `pnpm stack` |
| `stack:postgres` | PostgreSQL database | `pnpm stack --postgres` |
| `stack:queue` | Queue mode | `pnpm stack --queue` |
| `stack:multi-main` | Multi-main setup | `pnpm stack --mains 2 --workers 1` |
| `stack:starter` | Starter performance plan (SQLite only) | `pnpm stack --plan starter` |
## Container Architecture
@@ -159,6 +213,7 @@ pnpm run stack:clean:all
- **Parallel Testing**: Use `--name` parameter to run multiple stacks without conflicts
- **Queue Mode**: Automatically enables PostgreSQL (required for queue mode)
- **Multi-Main**: Requires queue mode and special licensing read from N8N_LICENSE_ACTIVATION_KEY environment variable
- **Performance Plans**: Use `--plan` for quick cloud plan simulation
- **Log Monitoring**: Use the `ContainerTestHelpers` class for advanced log monitoring in tests
## Docker Image

View File

@@ -0,0 +1,12 @@
/**
* n8n Test Containers
*
* This package provides container management utilities for n8n testing.
*/
export { createN8NStack } from './n8n-test-container-creation';
export type { N8NConfig, N8NStack } from './n8n-test-container-creation';
export * from './performance-plans';
export { ContainerTestHelpers } from './n8n-test-container-helpers';

View File

@@ -4,6 +4,7 @@ import { parseArgs } from 'node:util';
import { DockerImageNotFoundError } from './docker-image-not-found-error';
import type { N8NConfig, N8NStack } from './n8n-test-container-creation';
import { createN8NStack } from './n8n-test-container-creation';
import { BASE_PERFORMANCE_PLANS, isValidPerformancePlan } from './performance-plans';
// ANSI colors for terminal output
const colors = {
@@ -40,8 +41,17 @@ ${colors.yellow}Options:${colors.reset}
--workers <n> Number of worker instances (default: 1)
--name <name> Project name for parallel runs
--env KEY=VALUE Set environment variables
--plan <plan> Use performance plan preset (${Object.keys(BASE_PERFORMANCE_PLANS).join(', ')})
--help, -h Show this help
${colors.yellow}Performance Plans:${colors.reset}
${Object.entries(BASE_PERFORMANCE_PLANS)
.map(
([name, plan]) =>
` ${name.padEnd(12)} ${plan.memory}GB RAM, ${plan.cpu} CPU cores - SQLite only`,
)
.join('\n')}
${colors.yellow}Environment Variables:${colors.reset}
• N8N_DOCKER_IMAGE=<image> Use a custom Docker image (default: n8nio/n8n:local)
@@ -61,6 +71,11 @@ ${colors.yellow}Examples:${colors.reset}
${colors.bright}# With environment variables${colors.reset}
npm run stack --postgres --env N8N_LOG_LEVEL=info --env N8N_ENABLED_MODULES=insights
${colors.bright}# Performance plan presets${colors.reset}
${Object.keys(BASE_PERFORMANCE_PLANS)
.map((name) => ` npm run stack --plan ${name}`)
.join('\n')}
${colors.bright}# Parallel instances${colors.reset}
npm run stack --name test-1
npm run stack --name test-2
@@ -69,6 +84,7 @@ ${colors.yellow}Notes:${colors.reset}
• SQLite is the default database (no external dependencies)
• Queue mode requires PostgreSQL and enables horizontal scaling
• Use --name for running multiple instances in parallel
• Performance plans simulate cloud constraints (SQLite only, resource-limited)
• Press Ctrl+C to stop all containers
`);
}
@@ -84,6 +100,7 @@ async function main() {
workers: { type: 'string' },
name: { type: 'string' },
env: { type: 'string', multiple: true },
plan: { type: 'string' },
},
allowPositionals: false,
});
@@ -117,6 +134,32 @@ async function main() {
}
}
if (values.plan) {
const planName = values.plan;
if (!isValidPerformancePlan(planName)) {
log.error(`Invalid performance plan: ${values.plan}`);
log.error(`Available plans: ${Object.keys(BASE_PERFORMANCE_PLANS).join(', ')}`);
process.exit(1);
}
const plan = BASE_PERFORMANCE_PLANS[planName];
if (values.postgres) {
log.warn('Performance plans use SQLite only. PostgreSQL option ignored.');
}
if (values.queue || values.mains || values.workers) {
log.warn('Performance plans use SQLite only. Queue mode ignored.');
}
config.resourceQuota = plan;
config.postgres = false; // Force SQLite for performance plans
config.queueMode = false; // Force single instance for performance plans
log.info(
`Using ${planName} performance plan: ${plan.memory}GB RAM, ${plan.cpu} CPU cores (SQLite only)`,
);
}
// Parse environment variables
if (values.env && values.env.length > 0) {
config.env = {};
@@ -182,6 +225,12 @@ function displayConfig(config: N8NConfig) {
log.info('Queue mode: disabled');
}
if (config.resourceQuota) {
log.info(
`Resource limits: ${config.resourceQuota.memory}GB RAM, ${config.resourceQuota.cpu} CPU cores`,
);
}
if (config.env) {
const envCount = Object.keys(config.env).length;
if (envCount > 0) {

View File

@@ -48,10 +48,17 @@ const BASE_ENV: Record<string, string> = {
N8N_LICENSE_ACTIVATION_KEY: process.env.N8N_LICENSE_ACTIVATION_KEY ?? '',
};
// Wait strategy for n8n containers
const N8N_WAIT_STRATEGY = Wait.forAll([
// Wait strategy for n8n main containers
const N8N_MAIN_WAIT_STRATEGY = Wait.forAll([
Wait.forListeningPorts(),
Wait.forHttp('/healthz/readiness', 5678).forStatusCode(200).withStartupTimeout(90000),
Wait.forHttp('/healthz/readiness', 5678).forStatusCode(200).withStartupTimeout(30000),
Wait.forLogMessage('Editor is now accessible via').withStartupTimeout(30000),
]);
// Wait strategy for n8n worker containers
const N8N_WORKER_WAIT_STRATEGY = Wait.forAll([
Wait.forListeningPorts(),
Wait.forLogMessage('n8n worker is now ready').withStartupTimeout(30000),
]);
// --- Interfaces ---
@@ -66,6 +73,10 @@ export interface N8NConfig {
};
env?: Record<string, string>;
projectName?: string;
resourceQuota?: {
memory?: number; // in GB
cpu?: number; // in cores
};
}
export interface N8NStack {
@@ -97,7 +108,7 @@ export interface N8NStack {
* });
*/
export async function createN8NStack(config: N8NConfig = {}): Promise<N8NStack> {
const { postgres = false, queueMode = false, env = {}, projectName } = config;
const { postgres = false, queueMode = false, env = {}, projectName, resourceQuota } = config;
const queueConfig = normalizeQueueConfig(queueMode);
const usePostgres = postgres || !!queueConfig;
const uniqueProjectName = projectName ?? `n8n-stack-${Math.random().toString(36).substring(7)}`;
@@ -195,6 +206,7 @@ export async function createN8NStack(config: N8NConfig = {}): Promise<N8NStack>
uniqueProjectName,
environment,
network,
resourceQuota,
});
containers.push(...instances);
@@ -216,6 +228,7 @@ export async function createN8NStack(config: N8NConfig = {}): Promise<N8NStack>
environment,
network,
directPort: assignedPort,
resourceQuota,
});
containers.push(...instances);
}
@@ -285,6 +298,10 @@ interface CreateInstancesOptions {
environment: Record<string, string>;
network?: StartedNetwork;
directPort?: number;
resourceQuota?: {
memory?: number; // in GB
cpu?: number; // in cores
};
}
async function createN8NInstances({
@@ -295,10 +312,11 @@ async function createN8NInstances({
network,
/** The host port to use for the main instance */
directPort,
resourceQuota,
}: CreateInstancesOptions): Promise<StartedTestContainer[]> {
const instances: StartedTestContainer[] = [];
// Create main instances
// Create main instances sequentially to avoid database migration conflicts
for (let i = 1; i <= mainCount; i++) {
const name = mainCount > 1 ? `${uniqueProjectName}-n8n-main-${i}` : `${uniqueProjectName}-n8n`;
const networkAlias = mainCount > 1 ? name : `${uniqueProjectName}-n8n-main-1`;
@@ -311,6 +329,7 @@ async function createN8NInstances({
instanceNumber: i,
networkAlias,
directPort: i === 1 ? directPort : undefined, // Only first main gets direct port
resourceQuota,
});
instances.push(container);
}
@@ -325,6 +344,7 @@ async function createN8NInstances({
network,
isWorker: true,
instanceNumber: i,
resourceQuota,
});
instances.push(container);
}
@@ -341,6 +361,10 @@ interface CreateContainerOptions {
instanceNumber: number;
networkAlias?: string;
directPort?: number;
resourceQuota?: {
memory?: number; // in GB
cpu?: number; // in cores
};
}
async function createN8NContainer({
@@ -352,6 +376,7 @@ async function createN8NContainer({
instanceNumber,
networkAlias,
directPort,
resourceQuota,
}: CreateContainerOptions): Promise<StartedTestContainer> {
const { consumer, throwWithLogs } = createSilentLogConsumer();
@@ -367,9 +392,15 @@ async function createN8NContainer({
.withPullPolicy(new N8nImagePullPolicy(N8N_IMAGE))
.withName(name)
.withLogConsumer(consumer)
.withName(name)
.withReuse();
if (resourceQuota) {
container = container.withResourcesQuota({
memory: resourceQuota.memory,
cpu: resourceQuota.cpu,
});
}
if (network) {
container = container.withNetwork(network);
if (networkAlias) {
@@ -378,12 +409,14 @@ async function createN8NContainer({
}
if (isWorker) {
container = container.withCommand(['worker']);
container = container.withCommand(['worker']).withWaitStrategy(N8N_WORKER_WAIT_STRATEGY);
} else {
container = container.withExposedPorts(5678).withWaitStrategy(N8N_WAIT_STRATEGY);
container = container.withExposedPorts(5678).withWaitStrategy(N8N_MAIN_WAIT_STRATEGY);
if (directPort) {
container = container.withExposedPorts({ container: 5678, host: directPort });
container = container
.withExposedPorts({ container: 5678, host: directPort })
.withWaitStrategy(N8N_MAIN_WAIT_STRATEGY);
}
}

View File

@@ -11,6 +11,7 @@
"stack:postgres": "TESTCONTAINERS_REUSE_ENABLE=true npm run stack -- --postgres",
"stack:queue": "TESTCONTAINERS_REUSE_ENABLE=true npm run stack -- --queue",
"stack:multi-main": "TESTCONTAINERS_REUSE_ENABLE=true npm run stack -- --mains 2 --workers 1",
"stack:starter": "TESTCONTAINERS_REUSE_ENABLE=true npm run stack -- --plan starter",
"stack:clean:containers": "docker ps -aq --filter 'name=n8n-stack-*' | xargs -r docker rm -f 2>/dev/null",
"stack:clean:networks": "docker network ls --filter 'label=org.testcontainers=true' -q | xargs -r docker network rm 2>/dev/null",
"stack:clean:all": "pnpm run stack:clean:containers && pnpm run stack:clean:networks",

View File

@@ -0,0 +1,28 @@
/**
* Shared Performance Plan Types and Configurations
*
* This file provides the base performance plan definitions that can be used by:
* - CLI tools (n8n-start-stack.ts)
* - Playwright tests (cloud-only.ts)
*
*/
// Base performance plan configuration (resource constraints only)
export interface BasePerformancePlan {
memory: number; // in GB
cpu: number; // in cores
}
export const BASE_PERFORMANCE_PLANS: Record<string, BasePerformancePlan> = {
trial: { memory: 0.75, cpu: 1 }, // 768MB RAM, 1000 millicore CPU
starter: { memory: 0.75, cpu: 1 }, // 768MB RAM, 1000 millicore CPU
pro1: { memory: 1.25, cpu: 1 }, // 1.25GB RAM, 1000 millicore CPU
pro2: { memory: 2.5, cpu: 1.5 }, // 2.5GB RAM, 1500 millicore CPU
enterprise: { memory: 8.0, cpu: 2.0 }, // 8GB RAM, 2.0 CPU core
} as const;
export type PerformancePlanName = keyof typeof BASE_PERFORMANCE_PLANS;
export function isValidPerformancePlan(name: string): name is PerformancePlanName {
return name in BASE_PERFORMANCE_PLANS;
}

View File

@@ -36,6 +36,25 @@ test('basic test', ...) // All modes, fully paralle
test('postgres only @mode:postgres', ...) // Mode-specific
test('needs clean db @db:reset', ...) // Sequential per worker
test('chaos test @mode:multi-main @chaostest', ...) // Isolated per worker
test('cloud resource test @cloud:trial', ...) // Cloud resource constraints
```
## Fixture Selection
- **`base.ts`**: Standard testing with worker-scoped containers (default choice)
- **`cloud-only.ts`**: Cloud resource testing with guaranteed isolation
- Use for performance testing under resource constraints
- Requires `@cloud:*` tags (`@cloud:trial`, `@cloud:enterprise`, etc.)
- Creates only cloud containers, no worker containers
```typescript
// Standard testing
import { test, expect } from '../fixtures/base';
// Cloud resource testing
import { test, expect } from '../fixtures/cloud-only';
test('Performance under constraints @cloud:trial', async ({ n8n, api }) => {
// Test runs with 384MB RAM, 250 millicore CPU
});
```
## Tips
@@ -47,6 +66,8 @@ test('chaos test @mode:multi-main @chaostest', ...) // Isolated per worker
- **composables**: Multi-page interactions (e.g., `WorkflowComposer.executeWorkflowAndWaitForNotification()`)
- **config**: Test setup and configuration (constants, test users, etc.)
- **fixtures**: Custom test fixtures extending Playwright's base test
- `base.ts`: Standard fixtures with worker-scoped containers
- `cloud-only.ts`: Cloud resource testing with test-scoped containers only
- **pages**: Page Object Models for UI interactions
- **services**: API helpers for E2E controller, REST calls, etc.
- **utils**: Utility functions (string manipulation, helpers, etc.)

View File

@@ -2,7 +2,6 @@ import { test as base, expect } from '@playwright/test';
import type { N8NStack } from 'n8n-containers/n8n-test-container-creation';
import { createN8NStack } from 'n8n-containers/n8n-test-container-creation';
import { ContainerTestHelpers } from 'n8n-containers/n8n-test-container-helpers';
import { setTimeout as wait } from 'node:timers/promises';
import { setupDefaultInterceptors } from '../config/intercepts';
import { n8nPage } from '../pages/n8nPage';
@@ -70,9 +69,6 @@ export const test = base.extend<TestFixtures, WorkerFixtures>({
console.log('Creating container with config:', containerConfig);
const container = await createN8NStack(containerConfig);
// TODO: Remove this once we have a better way to wait for the container to be ready (e.g. healthcheck)
await wait(3000);
console.log(`Container URL: ${container.baseUrl}`);
await use(container);

View File

@@ -0,0 +1,169 @@
/**
* Cloud Resource Testing Fixtures
*
* This fixture provides cloud containers with worker containers.
* Use this when you want to test with cloud resource constraints.
*
* Architecture:
* - No worker containers - cloud containers only
* - Test-scoped containers with resource limits
* - Complete fixture chain (n8n, api, context, page)
* - Per-test database reset
*/
import { test as base, expect } from '@playwright/test';
import type { N8NConfig, N8NStack } from 'n8n-containers/n8n-test-container-creation';
import { createN8NStack } from 'n8n-containers/n8n-test-container-creation';
import { type PerformancePlanName, BASE_PERFORMANCE_PLANS } from 'n8n-containers/performance-plans';
import { setupDefaultInterceptors } from '../config/intercepts';
import { n8nPage } from '../pages/n8nPage';
import { ApiHelpers } from '../services/api-helper';
/**
* Create standardized project name for containers
*/
function createProjectName(prefix: string, profile: string, testTitle: string): string {
return `${prefix}-${profile}-${testTitle.replace(/[^a-z0-9]/gi, '-').toLowerCase()}`;
}
type CloudOnlyFixtures = {
cloudContainer: N8NStack;
n8n: n8nPage;
api: ApiHelpers;
baseURL: string;
};
/**
* Extract cloud resource profile from test tags
* Looks for @cloud:trial, @cloud:enterprise, etc.
*/
function getCloudResourceProfile(tags: string[]): PerformancePlanName | null {
const cloudTag = tags.find((tag) => tag.startsWith('@cloud:'));
if (!cloudTag) return null;
const profile = cloudTag.replace('@cloud:', '');
if (profile in BASE_PERFORMANCE_PLANS) {
return profile;
}
return null;
}
/**
* Cloud-only test fixtures - no worker containers, only cloud containers
*/
export const test = base.extend<CloudOnlyFixtures>({
cloudContainer: async ({ browser }, use, testInfo) => {
const cloudProfile = getCloudResourceProfile(testInfo.tags);
if (!cloudProfile) {
throw new Error(
`Cloud-only fixture requires @cloud:* tags. Found tags: ${testInfo.tags.join(', ')}`,
);
}
if (process.env.N8N_BASE_URL) {
throw new Error('Cloud-only fixture cannot be used with N8N_BASE_URL environment variable');
}
const resourceConfig = BASE_PERFORMANCE_PLANS[cloudProfile];
console.log(`Creating cloud container: ${cloudProfile}`);
const config: N8NConfig = {
resourceQuota: {
memory: resourceConfig.memory,
cpu: resourceConfig.cpu,
},
env: {
E2E_TESTS: 'true',
},
projectName: createProjectName('n8n-stack-cloud', cloudProfile, testInfo.title),
};
const stack = await createN8NStack(config);
console.log('🔄 Resetting database for cloud container');
const context = await browser.newContext({ baseURL: stack.baseUrl });
const api = new ApiHelpers(context.request);
await api.resetDatabase();
await context.close();
console.log(`✅ Cloud container ready: ${stack.baseUrl}`);
await use(stack);
// Cleanup
console.log('🧹 Cleaning up cloud container');
await stack.stop();
},
// Base URL from cloud container
baseURL: async ({ cloudContainer }, use) => {
await use(cloudContainer.baseUrl);
},
// Browser context with cloud container URL and interceptors
context: async ({ context, baseURL }, use) => {
await setupDefaultInterceptors(context);
await use(context);
},
// Page with authentication setup
page: async ({ context }, use, testInfo) => {
const page = await context.newPage();
const api = new ApiHelpers(context.request);
// Set up authentication from tags (works for cloud containers)
await api.setupFromTags(testInfo.tags);
await use(page);
await page.close();
},
// n8n page object
n8n: async ({ page }, use) => {
const n8nInstance = new n8nPage(page);
await use(n8nInstance);
},
// API helpers
api: async ({ context }, use) => {
const api = new ApiHelpers(context.request);
await use(api);
},
});
export { expect };
/*
CLOUD-ONLY FIXTURE BENEFITS:
✅ No worker containers: Only cloud containers are created
✅ Guaranteed cloud testing: Tests must have @cloud:* tags or they fail
✅ Complete fixture chain: Full n8n/api/context/page fixtures available
✅ Fresh containers: Each test gets its own cloud container with resource limits
✅ Clean database state: Per-test database reset with enhanced timing
✅ Resource isolation: True cloud plan simulation without interference
Usage:
// Import the cloud-only fixture instead of base
import { test, expect } from '../../fixtures/cloud-only';
test('Performance test @cloud:trial', async ({ n8n, api }) => {
// This test runs ONLY on a trial plan container (768MB, 200 millicore)
// No worker containers are created
});
Flow:
1. Detect @cloud:* tag (required)
2. Create cloud container with resource limits
3. Wait 5s + database reset with retries
4. Provide complete n8n/api fixture chain
5. Run test against cloud container only
6. Clean up cloud container
Perfect for: Performance testing, resource constraint testing, cloud plan validation
*/

View File

@@ -11,6 +11,7 @@
"test:container:postgres": "playwright test --project='postgres:*'",
"test:container:queue": "playwright test --project='queue:*'",
"test:container:multi-main": "playwright test --project='multi-main:*'",
"test:container:trial": "playwright test --project='trial:*'",
"test:workflows:setup": "tsx ./tests/cli-workflows/setup-workflow-tests.ts",
"test:workflows": "playwright test --project=cli-workflows",
"test:workflows:schema": "SCHEMA=true playwright test --project=cli-workflows",

View File

@@ -15,7 +15,7 @@ const CONTAINER_CONFIGS: Array<{ name: string; config: N8NConfig }> = [
{ name: 'standard', config: {} },
{ name: 'postgres', config: { postgres: true } },
{ name: 'queue', config: { queueMode: true } },
{ name: 'multi-main', config: { queueMode: { mains: 2, workers: 1 } } }, // Multi main is having timing issues on startup, needs to be resolved
{ name: 'multi-main', config: { queueMode: { mains: 2, workers: 1 } } },
];
export function getProjects(): Project[] {

View File

@@ -0,0 +1,71 @@
/**
* Large Node Performance Tests with Cloud Resource Constraints
*
* These tests use @cloud-* tags to automatically create resource-limited containers
* that simulate n8n Cloud plan constraints.
*/
import { test, expect } from '../../fixtures/cloud';
import type { n8nPage } from '../../pages/n8nPage';
import { measurePerformance } from '../../utils/performance-helper';
async function setupPerformanceTest(n8n: n8nPage, size: number) {
await n8n.goHome();
await n8n.workflows.clickNewWorkflowCard();
await n8n.canvas.importWorkflow('large.json', 'Large Workflow');
await n8n.notifications.closeNotificationByText('Successful');
// Configure data size
await n8n.canvas.openNode('Edit Fields');
await n8n.page
.getByTestId('parameter-input-value')
.getByTestId('parameter-input-field')
.fill(size.toString());
await n8n.ndv.clickBackToCanvasButton();
}
test.describe('Large Node Performance - Cloud Resources', () => {
test('Large workflow with starter plan resources @cloud:starter', async ({ n8n }) => {
await setupPerformanceTest(n8n, 30000);
const loopSize = 20;
const stats = [];
await n8n.workflowComposer.executeWorkflowAndWaitForNotification(
'Workflow executed successfully',
{
timeout: 30000,
},
);
for (let i = 0; i < loopSize; i++) {
const openNodeDuration = await measurePerformance(n8n.page, `open-node-${i}`, async () => {
await n8n.canvas.openNode('Code');
});
stats.push(openNodeDuration);
await n8n.ndv.clickBackToCanvasButton();
console.log(`✓ Open node (${i + 1} of ${loopSize}): ${openNodeDuration.toFixed(1)}ms`);
}
const average = stats.reduce((a, b) => a + b, 0) / stats.length;
console.log(`Average open node duration: ${average.toFixed(1)}ms`);
expect(average).toBeLessThan(5000);
});
});
/*
Usage:
# Run all performance tests (including cloud resource tests)
pnpm --filter n8n-playwright test:performance
# Run only cloud resource tests
pnpm --filter n8n-playwright test --grep "@cloud:"
# Run specific cloud plan tests
pnpm --filter n8n-playwright test --grep "@cloud:trial"
pnpm --filter n8n-playwright test --grep "@cloud:enterprise"
# Run this specific file (cloud resource tests only)
pnpm --filter n8n-playwright test tests/performance/large-node-cloud.spec.ts
*/