mirror of
https://github.com/Abdulazizzn/n8n-enterprise-unlocked.git
synced 2025-12-17 01:56:46 +00:00
Based on #7065 | Story: https://linear.app/n8n/issue/PAY-771 n8n on filesystem mode marks binary data to delete on manual execution deletion, on unsaved execution completion, and on every execution pruning cycle. We later prune binary data in a separate cycle via these marker files, based on the configured TTL. In the context of introducing an S3 client to manage binary data, the filesystem mode's mark-and-prune setup is too tightly coupled to the general binary data management client interface. This PR... - Ensures the deletion of an execution causes the deletion of any binary data associated to it. This does away with the need for binary data TTL and simplifies the filesystem mode's mark-and-prune setup. - Refactors all execution deletions (including pruning) to cause soft deletions, hard-deletes soft-deleted executions based on the existing pruning config, and adjusts execution endpoints to filter out soft-deleted executions. This reduces DB load, and keeps binary data around long enough for users to access it when building workflows with unsaved executions. - Moves all execution pruning work from an execution lifecycle hook to `execution.repository.ts`. This keeps related logic in a single place. - Removes all marking logic from the binary data manager. This simplifies the interface that the S3 client will meet. - Adds basic sanity-check tests to pruning logic and execution deletion. Out of scope: - Improving existing pruning logic. - Improving existing execution repository logic. - Adjusting dir structure for filesystem mode. --------- Co-authored-by: कारतोफ्फेलस्क्रिप्ट™ <aditya@netroy.in>
100 lines
3.3 KiB
TypeScript
100 lines
3.3 KiB
TypeScript
import type { ExecutionStatus, IRun, IWorkflowBase } from 'n8n-workflow';
|
|
import type { ExecutionPayload, IExecutionDb } from '@/Interfaces';
|
|
import pick from 'lodash/pick';
|
|
import { isWorkflowIdValid } from '@/utils';
|
|
import { LoggerProxy } from 'n8n-workflow';
|
|
import Container from 'typedi';
|
|
import { ExecutionRepository } from '../../databases/repositories';
|
|
import { ExecutionMetadataService } from '../../services/executionMetadata.service';
|
|
|
|
export function determineFinalExecutionStatus(runData: IRun): ExecutionStatus {
|
|
const workflowHasCrashed = runData.status === 'crashed';
|
|
const workflowWasCanceled = runData.status === 'canceled';
|
|
const workflowDidSucceed =
|
|
!runData.data.resultData.error && !workflowHasCrashed && !workflowWasCanceled;
|
|
let workflowStatusFinal: ExecutionStatus = workflowDidSucceed ? 'success' : 'failed';
|
|
if (workflowHasCrashed) workflowStatusFinal = 'crashed';
|
|
if (workflowWasCanceled) workflowStatusFinal = 'canceled';
|
|
if (runData.waitTill) workflowStatusFinal = 'waiting';
|
|
return workflowStatusFinal;
|
|
}
|
|
|
|
export function prepareExecutionDataForDbUpdate(parameters: {
|
|
runData: IRun;
|
|
workflowData: IWorkflowBase;
|
|
workflowStatusFinal: ExecutionStatus;
|
|
retryOf?: string;
|
|
}) {
|
|
const { runData, workflowData, workflowStatusFinal, retryOf } = parameters;
|
|
// Although it is treated as IWorkflowBase here, it's being instantiated elsewhere with properties that may be sensitive
|
|
// As a result, we should create an IWorkflowBase object with only the data we want to save in it.
|
|
const pristineWorkflowData: IWorkflowBase = pick(workflowData, [
|
|
'id',
|
|
'name',
|
|
'active',
|
|
'createdAt',
|
|
'updatedAt',
|
|
'nodes',
|
|
'connections',
|
|
'settings',
|
|
'staticData',
|
|
'pinData',
|
|
]);
|
|
|
|
const fullExecutionData: ExecutionPayload = {
|
|
data: runData.data,
|
|
mode: runData.mode,
|
|
finished: runData.finished ? runData.finished : false,
|
|
startedAt: runData.startedAt,
|
|
stoppedAt: runData.stoppedAt,
|
|
workflowData: pristineWorkflowData,
|
|
waitTill: runData.waitTill,
|
|
status: workflowStatusFinal,
|
|
};
|
|
|
|
if (retryOf !== undefined) {
|
|
fullExecutionData.retryOf = retryOf.toString();
|
|
}
|
|
|
|
const workflowId = workflowData.id;
|
|
if (isWorkflowIdValid(workflowId)) {
|
|
fullExecutionData.workflowId = workflowId;
|
|
}
|
|
|
|
return fullExecutionData;
|
|
}
|
|
|
|
export async function updateExistingExecution(parameters: {
|
|
executionId: string;
|
|
workflowId: string;
|
|
executionData: Partial<IExecutionDb>;
|
|
}) {
|
|
const { executionId, workflowId, executionData } = parameters;
|
|
// Leave log message before flatten as that operation increased memory usage a lot and the chance of a crash is highest here
|
|
LoggerProxy.debug(`Save execution data to database for execution ID ${executionId}`, {
|
|
executionId,
|
|
workflowId,
|
|
finished: executionData.finished,
|
|
stoppedAt: executionData.stoppedAt,
|
|
});
|
|
|
|
await Container.get(ExecutionRepository).updateExistingExecution(executionId, executionData);
|
|
|
|
try {
|
|
if (executionData.data?.resultData.metadata) {
|
|
await Container.get(ExecutionMetadataService).save(
|
|
executionId,
|
|
executionData.data.resultData.metadata,
|
|
);
|
|
}
|
|
} catch (e) {
|
|
LoggerProxy.error(`Failed to save metadata for execution ID ${executionId}`, e as Error);
|
|
}
|
|
|
|
if (executionData.finished === true && executionData.retryOf !== undefined) {
|
|
await Container.get(ExecutionRepository).updateExistingExecution(executionData.retryOf, {
|
|
retrySuccessId: executionId,
|
|
});
|
|
}
|
|
}
|