mirror of
https://github.com/Abdulazizzn/n8n-enterprise-unlocked.git
synced 2025-12-17 10:02:05 +00:00
fix(core): Fix metric default value handling and add AI model connection validation for setMetric operation in Evaluation (#18088)
This commit is contained in:
25
packages/workflow/src/evaluation-helpers.ts
Normal file
25
packages/workflow/src/evaluation-helpers.ts
Normal file
@@ -0,0 +1,25 @@
|
||||
/**
|
||||
* Evaluation-related utility functions
|
||||
*
|
||||
* This file contains utilities that need to be shared between different packages
|
||||
* to avoid circular dependencies. For example, the evaluation test-runner (in CLI package)
|
||||
* and the Evaluation node (in nodes-base package) both need to know which metrics
|
||||
* require AI model connections, but they can't import from each other directly.
|
||||
*
|
||||
* By placing shared utilities here in the workflow package (which both packages depend on),
|
||||
* we avoid circular dependency issues.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Default metric type used in evaluations
|
||||
*/
|
||||
export const DEFAULT_EVALUATION_METRIC = 'correctness';
|
||||
|
||||
/**
|
||||
* Determines if a given evaluation metric requires an AI model connection
|
||||
* @param metric The metric name to check
|
||||
* @returns true if the metric requires an AI model connection
|
||||
*/
|
||||
export function metricRequiresModelConnection(metric: string): boolean {
|
||||
return ['correctness', 'helpfulness'].includes(metric);
|
||||
}
|
||||
@@ -67,6 +67,7 @@ export { ExpressionExtensions } from './extensions';
|
||||
export * as ExpressionParser from './extensions/expression-parser';
|
||||
export { NativeMethods } from './native-methods';
|
||||
export * from './node-parameters/filter-parameter';
|
||||
export * from './evaluation-helpers';
|
||||
|
||||
export type {
|
||||
DocMetadata,
|
||||
|
||||
Reference in New Issue
Block a user