fix(core): Fix metric default value handling and add AI model connection validation for setMetric operation in Evaluation (#18088)

This commit is contained in:
jeanpaul
2025-08-08 12:38:58 +02:00
committed by GitHub
parent 9a8417d27b
commit 03c75c365b
7 changed files with 307 additions and 40 deletions

View File

@@ -0,0 +1,25 @@
/**
* Evaluation-related utility functions
*
* This file contains utilities that need to be shared between different packages
* to avoid circular dependencies. For example, the evaluation test-runner (in CLI package)
* and the Evaluation node (in nodes-base package) both need to know which metrics
* require AI model connections, but they can't import from each other directly.
*
* By placing shared utilities here in the workflow package (which both packages depend on),
* we avoid circular dependency issues.
*/
/**
* Default metric type used in evaluations
*/
export const DEFAULT_EVALUATION_METRIC = 'correctness';
/**
* Determines if a given evaluation metric requires an AI model connection
* @param metric The metric name to check
* @returns true if the metric requires an AI model connection
*/
export function metricRequiresModelConnection(metric: string): boolean {
return ['correctness', 'helpfulness'].includes(metric);
}

View File

@@ -67,6 +67,7 @@ export { ExpressionExtensions } from './extensions';
export * as ExpressionParser from './extensions/expression-parser';
export { NativeMethods } from './native-methods';
export * from './node-parameters/filter-parameter';
export * from './evaluation-helpers';
export type {
DocMetadata,