test: Add custom reporter for test metrics (#18960)

This commit is contained in:
shortstacked
2025-09-01 13:17:14 +01:00
committed by GitHub
parent ec7eddc364
commit 71b33277f8
8 changed files with 256 additions and 8 deletions

View File

@@ -34,6 +34,7 @@
"n8n-core": "workspace:*",
"n8n-workflow": "workspace:*",
"nanoid": "catalog:",
"tsx": "catalog:"
"tsx": "catalog:",
"zod": "catalog:"
}
}

View File

@@ -69,6 +69,7 @@ export default defineConfig({
['html', { open: 'never' }],
['json', { outputFile: 'test-results.json' }],
currentsReporter(currentsConfig),
['./reporters/metrics-reporter.ts'],
]
: [['html']],
: [['html'], ['./reporters/metrics-reporter.ts']],
});

View File

@@ -0,0 +1,66 @@
# Metrics Reporter Usage
Automatically collect performance metrics from Playwright tests and send them to a Webhook.
## Setup
```bash
export QA_PERFORMANCE_METRICS_WEBHOOK_URL=https://your-webhook-endpoint.com/metrics
export QA_PERFORMANCE_METRICS_WEBHOOK_USER=username
export QA_PERFORMANCE_METRICS_WEBHOOK_PASSWORD=password
```
## Attach Metrics in Tests
**Option 1: Helper function (recommended)**
```javascript
import { attachMetric } from '../../utils/performance-helper';
await attachMetric(testInfo, 'memory-usage', 1234567, 'bytes');
```
**Option 2: Direct attach**
```javascript
await testInfo.attach('metric:memory-usage', {
body: JSON.stringify({ value: 1234567, unit: 'bytes' })
});
```
## What Gets Sent to BigQuery
```json
{
"test_name": "My performance test",
"metric_name": "memory-usage",
"metric_value": 1234567,
"metric_unit": "bytes",
"git_commit": "abc123...",
"git_branch": "main",
"timestamp": "2025-08-29T..."
}
```
## Data Pipeline
**Playwright Test****n8n Webhook****BigQuery Table**
The n8n workflow that processes the metrics is here:
https://internal.users.n8n.cloud/workflow/zSRjEwfBfCNjGXK8
## BigQuery Schema
```json
{
"fields": [
{"name": "test_name", "type": "STRING", "mode": "REQUIRED"},
{"name": "metric_name", "type": "STRING", "mode": "REQUIRED"},
{"name": "metric_value", "type": "FLOAT", "mode": "REQUIRED"},
{"name": "metric_unit", "type": "STRING", "mode": "REQUIRED"},
{"name": "git_commit", "type": "STRING", "mode": "REQUIRED"},
{"name": "git_branch", "type": "STRING", "mode": "REQUIRED"},
{"name": "timestamp", "type": "TIMESTAMP", "mode": "REQUIRED"}
]
}
```
That's it! Metrics are automatically collected and sent when you attach them to tests.

View File

@@ -0,0 +1,147 @@
import type { Reporter, TestCase, TestResult } from '@playwright/test/reporter';
import { strict as assert } from 'assert';
import { execSync } from 'child_process';
import { z } from 'zod';
const metricDataSchema = z.object({
value: z.number(),
unit: z.string().optional(),
});
interface Metric {
name: string;
value: number;
unit: string | null;
}
interface ReporterOptions {
webhookUrl?: string;
webhookUser?: string;
webhookPassword?: string;
}
/**
* Automatically collect performance metrics from Playwright tests and send them to a Webhook.
* If your test contains a testInfo.attach() call with a name starting with 'metric:', the metric will be collected and sent to the Webhook.
*/
class MetricsReporter implements Reporter {
private webhookUrl: string | undefined;
private webhookUser: string | undefined;
private webhookPassword: string | undefined;
private pendingRequests: Array<Promise<void>> = [];
constructor(options: ReporterOptions = {}) {
this.webhookUrl = options.webhookUrl ?? process.env.QA_PERFORMANCE_METRICS_WEBHOOK_URL;
this.webhookUser = options.webhookUser ?? process.env.QA_PERFORMANCE_METRICS_WEBHOOK_USER;
this.webhookPassword =
options.webhookPassword ?? process.env.QA_PERFORMANCE_METRICS_WEBHOOK_PASSWORD;
}
async onTestEnd(test: TestCase, result: TestResult): Promise<void> {
if (
!this.webhookUrl ||
!this.webhookUser ||
!this.webhookPassword ||
result.status === 'skipped'
) {
return;
}
const metrics = this.collectMetrics(result);
if (metrics.length > 0) {
const sendPromise = this.sendMetrics(test, metrics);
this.pendingRequests.push(sendPromise);
await sendPromise;
}
}
private collectMetrics(result: TestResult): Metric[] {
const metrics: Metric[] = [];
result.attachments.forEach((attachment) => {
if (attachment.name.startsWith('metric:')) {
const metricName = attachment.name.replace('metric:', '');
try {
const parsedData = JSON.parse(attachment.body?.toString() ?? '');
const data = metricDataSchema.parse(parsedData);
metrics.push({
name: metricName,
value: data.value,
unit: data.unit ?? null,
});
} catch (e) {
console.warn(
`[MetricsReporter] Failed to parse metric ${metricName}: ${(e as Error).message}`,
);
}
}
});
return metrics;
}
private async sendMetrics(test: TestCase, metrics: Metric[]): Promise<void> {
const gitInfo = this.getGitInfo();
assert(gitInfo.commit, 'Git commit must be defined');
assert(gitInfo.branch, 'Git branch must be defined');
assert(gitInfo.author, 'Git author must be defined');
const payload = {
test_name: test.title,
git_commit: gitInfo.commit,
git_branch: gitInfo.branch,
git_author: gitInfo.author,
timestamp: new Date().toISOString(),
metrics: metrics.map((metric) => ({
metric_name: metric.name,
metric_value: metric.value,
metric_unit: metric.unit,
})),
};
try {
const auth = Buffer.from(`${this.webhookUser}:${this.webhookPassword}`).toString('base64');
const response = await fetch(this.webhookUrl!, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
Authorization: `Basic ${auth}`,
},
body: JSON.stringify(payload),
signal: AbortSignal.timeout(10000),
});
if (!response.ok) {
console.warn(`[MetricsReporter] Webhook failed (${response.status}): ${test.title}`);
}
} catch (e) {
console.warn(
`[MetricsReporter] Failed to send metrics for test ${test.title}: ${(e as Error).message}`,
);
}
}
async onEnd(): Promise<void> {
if (this.pendingRequests.length > 0) {
await Promise.allSettled(this.pendingRequests);
}
}
private getGitInfo(): { commit: string | null; branch: string | null; author: string | null } {
try {
return {
commit: execSync('git rev-parse HEAD', { encoding: 'utf8' }).trim(),
branch: execSync('git rev-parse --abbrev-ref HEAD', { encoding: 'utf8' }).trim(),
author: execSync('git log -1 --pretty=format:"%an"', { encoding: 'utf8' }).trim(),
};
} catch (e) {
console.error(`[MetricsReporter] Failed to get Git info: ${(e as Error).message}`);
return { commit: null, branch: null, author: null };
}
}
}
// eslint-disable-next-line import-x/no-default-export
export default MetricsReporter;

View File

@@ -7,7 +7,7 @@
import { test, expect } from '../../fixtures/cloud';
import type { n8nPage } from '../../pages/n8nPage';
import { measurePerformance } from '../../utils/performance-helper';
import { measurePerformance, attachMetric } from '../../utils/performance-helper';
async function setupPerformanceTest(n8n: n8nPage, size: number) {
await n8n.goHome();
@@ -25,7 +25,7 @@ async function setupPerformanceTest(n8n: n8nPage, size: number) {
}
test.describe('Large Node Performance - Cloud Resources', () => {
test('Large workflow with starter plan resources @cloud:starter', async ({ n8n }) => {
test('Large workflow with starter plan resources @cloud:starter', async ({ n8n }, testInfo) => {
await setupPerformanceTest(n8n, 30000);
const loopSize = 20;
const stats = [];
@@ -49,6 +49,10 @@ test.describe('Large Node Performance - Cloud Resources', () => {
}
const average = stats.reduce((a, b) => a + b, 0) / stats.length;
console.log(`Average open node duration: ${average.toFixed(1)}ms`);
// Attach performance metric using helper method
await attachMetric(testInfo, 'open-node-30000', average, 'ms');
expect(average).toBeLessThan(5000);
});
});

View File

@@ -1,10 +1,13 @@
import { test, expect } from '../../fixtures/base';
import type { n8nPage } from '../../pages/n8nPage';
import { getAllPerformanceMetrics, measurePerformance } from '../../utils/performance-helper';
import {
getAllPerformanceMetrics,
measurePerformance,
attachMetric,
} from '../../utils/performance-helper';
async function setupPerformanceTest(n8n: n8nPage, size: number) {
await n8n.goHome();
await n8n.workflows.clickNewWorkflowCard();
await n8n.start.fromNewProject();
await n8n.canvas.importWorkflow('large.json', 'Large Workflow');
await n8n.notifications.closeNotificationByText('Successful');
@@ -51,6 +54,8 @@ test.describe('Performance Example: Multiple sets}', () => {
});
});
await attachMetric(test.info(), `trigger-workflow-${size}`, triggerDuration, 'ms');
// Assert trigger performance
expect(triggerDuration).toBeLessThan(budgets.triggerWorkflow);
console.log(
@@ -62,6 +67,9 @@ test.describe('Performance Example: Multiple sets}', () => {
await n8n.canvas.openNode('Code');
});
// Attach performance metric using helper method
await attachMetric(test.info(), `open-large-node-${size}`, openNodeDuration, 'ms');
// Assert node opening performance
expect(openNodeDuration).toBeLessThan(budgets.openLargeNode);
console.log(

View File

@@ -1,4 +1,4 @@
import type { Page } from '@playwright/test';
import type { Page, TestInfo } from '@playwright/test';
export async function measurePerformance(
page: Page,
@@ -28,3 +28,21 @@ export async function getAllPerformanceMetrics(page: Page) {
return metrics;
});
}
/**
* Attach a performance metric for collection by the metrics reporter
* @param testInfo - The Playwright TestInfo object
* @param metricName - Name of the metric (will be prefixed with 'metric:')
* @param value - The numeric value to track
* @param unit - The unit of measurement (e.g., 'ms', 'bytes', 'count')
*/
export async function attachMetric(
testInfo: TestInfo,
metricName: string,
value: number,
unit?: string,
): Promise<void> {
await testInfo.attach(`metric:${metricName}`, {
body: JSON.stringify({ value, unit }),
});
}

3
pnpm-lock.yaml generated
View File

@@ -3183,6 +3183,9 @@ importers:
tsx:
specifier: 'catalog:'
version: 4.19.3
zod:
specifier: 'catalog:'
version: 3.25.67
packages/workflow:
dependencies: