feat(Google Gemini Node): New node (#16863)

This commit is contained in:
RomanDavydchuk
2025-07-09 13:29:00 +03:00
committed by GitHub
parent 9afa6d452a
commit 0f59eeaf5b
34 changed files with 3864 additions and 0 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,17 @@
import type { IExecuteFunctions, INodeType } from 'n8n-workflow';
import { router } from './actions/router';
import { versionDescription } from './actions/versionDescription';
import { listSearch } from './methods';
export class GoogleGemini implements INodeType {
description = versionDescription;
methods = {
listSearch,
};
async execute(this: IExecuteFunctions) {
return await router.call(this);
}
}

View File

@@ -0,0 +1,102 @@
import type { IExecuteFunctions, INodeExecutionData, INodeProperties } from 'n8n-workflow';
import { updateDisplayOptions } from 'n8n-workflow';
import { baseAnalyze } from '../../helpers/baseAnalyze';
import { modelRLC } from '../descriptions';
const properties: INodeProperties[] = [
modelRLC('audioModelSearch'),
{
displayName: 'Text Input',
name: 'text',
type: 'string',
placeholder: "e.g. What's in this audio?",
default: "What's in this audio?",
typeOptions: {
rows: 2,
},
},
{
displayName: 'Input Type',
name: 'inputType',
type: 'options',
default: 'url',
options: [
{
name: 'Audio URL(s)',
value: 'url',
},
{
name: 'Binary File(s)',
value: 'binary',
},
],
},
{
displayName: 'URL(s)',
name: 'audioUrls',
type: 'string',
placeholder: 'e.g. https://example.com/audio.mp3',
description: 'URL(s) of the audio(s) to analyze, multiple URLs can be added separated by comma',
default: '',
displayOptions: {
show: {
inputType: ['url'],
},
},
},
{
displayName: 'Input Data Field Name(s)',
name: 'binaryPropertyName',
type: 'string',
default: 'data',
placeholder: 'e.g. data',
hint: 'The name of the input field containing the binary file data to be processed',
description:
'Name of the binary field(s) which contains the audio(s), seperate multiple field names with commas',
displayOptions: {
show: {
inputType: ['binary'],
},
},
},
{
displayName: 'Simplify Output',
name: 'simplify',
type: 'boolean',
default: true,
description: 'Whether to simplify the response or not',
},
{
displayName: 'Options',
name: 'options',
placeholder: 'Add Option',
type: 'collection',
default: {},
options: [
{
displayName: 'Length of Description (Max Tokens)',
description: 'Fewer tokens will result in shorter, less detailed audio description',
name: 'maxOutputTokens',
type: 'number',
default: 300,
typeOptions: {
minValue: 1,
},
},
],
},
];
const displayOptions = {
show: {
operation: ['analyze'],
resource: ['audio'],
},
};
export const description = updateDisplayOptions(displayOptions, properties);
export async function execute(this: IExecuteFunctions, i: number): Promise<INodeExecutionData[]> {
return await baseAnalyze.call(this, i, 'audioUrls', 'audio/mpeg');
}

View File

@@ -0,0 +1,37 @@
import type { INodeProperties } from 'n8n-workflow';
import * as analyze from './analyze.operation';
import * as transcribe from './transcribe.operation';
export { analyze, transcribe };
export const description: INodeProperties[] = [
{
displayName: 'Operation',
name: 'operation',
type: 'options',
noDataExpression: true,
options: [
{
name: 'Analyze Audio',
value: 'analyze',
action: 'Analyze audio',
description: 'Take in audio and answer questions about it',
},
{
name: 'Transcribe a Recording',
value: 'transcribe',
action: 'Transcribe a recording',
description: 'Transcribes audio into the text',
},
],
default: 'transcribe',
displayOptions: {
show: {
resource: ['audio'],
},
},
},
...analyze.description,
...transcribe.description,
];

View File

@@ -0,0 +1,181 @@
import type { IExecuteFunctions, INodeExecutionData, INodeProperties } from 'n8n-workflow';
import { updateDisplayOptions } from 'n8n-workflow';
import type { Content, GenerateContentResponse } from '../../helpers/interfaces';
import { downloadFile, uploadFile } from '../../helpers/utils';
import { apiRequest } from '../../transport';
import { modelRLC } from '../descriptions';
const properties: INodeProperties[] = [
modelRLC('audioModelSearch'),
{
displayName: 'Input Type',
name: 'inputType',
type: 'options',
default: 'url',
options: [
{
name: 'Audio URL(s)',
value: 'url',
},
{
name: 'Binary File(s)',
value: 'binary',
},
],
},
{
displayName: 'URL(s)',
name: 'audioUrls',
type: 'string',
placeholder: 'e.g. https://example.com/audio.mp3',
description:
'URL(s) of the audio(s) to transcribe, multiple URLs can be added separated by comma',
default: '',
displayOptions: {
show: {
inputType: ['url'],
},
},
},
{
displayName: 'Input Data Field Name(s)',
name: 'binaryPropertyName',
type: 'string',
default: 'data',
placeholder: 'e.g. data',
hint: 'The name of the input field containing the binary file data to be processed',
description:
'Name of the binary field(s) which contains the audio(s), seperate multiple field names with commas',
displayOptions: {
show: {
inputType: ['binary'],
},
},
},
{
displayName: 'Simplify Output',
name: 'simplify',
type: 'boolean',
default: true,
description: 'Whether to simplify the response or not',
},
{
displayName: 'Options',
name: 'options',
type: 'collection',
default: {},
options: [
{
displayName: 'Start Time',
name: 'startTime',
type: 'string',
default: '',
description: 'The start time of the audio in MM:SS or HH:MM:SS format',
placeholder: 'e.g. 00:15',
},
{
displayName: 'End Time',
name: 'endTime',
type: 'string',
default: '',
description: 'The end time of the audio in MM:SS or HH:MM:SS format',
placeholder: 'e.g. 02:15',
},
],
},
];
const displayOptions = {
show: {
operation: ['transcribe'],
resource: ['audio'],
},
};
export const description = updateDisplayOptions(displayOptions, properties);
export async function execute(this: IExecuteFunctions, i: number): Promise<INodeExecutionData[]> {
const model = this.getNodeParameter('modelId', i, '', { extractValue: true }) as string;
const inputType = this.getNodeParameter('inputType', i, 'url') as string;
const simplify = this.getNodeParameter('simplify', i, true) as boolean;
const options = this.getNodeParameter('options', i, {});
let contents: Content[];
if (inputType === 'url') {
const urls = this.getNodeParameter('audioUrls', i, '') as string;
const filesDataPromises = urls
.split(',')
.map((url) => url.trim())
.filter((url) => url)
.map(async (url) => {
if (url.startsWith('https://generativelanguage.googleapis.com')) {
const { mimeType } = (await apiRequest.call(this, 'GET', '', {
option: { url },
})) as { mimeType: string };
return { fileUri: url, mimeType };
} else {
const { fileContent, mimeType } = await downloadFile.call(this, url, 'audio/mpeg');
return await uploadFile.call(this, fileContent, mimeType);
}
});
const filesData = await Promise.all(filesDataPromises);
contents = [
{
role: 'user',
parts: filesData.map((fileData) => ({
fileData,
})),
},
];
} else {
const binaryPropertyNames = this.getNodeParameter('binaryPropertyName', i, 'data');
const promises = binaryPropertyNames
.split(',')
.map((binaryPropertyName) => binaryPropertyName.trim())
.filter((binaryPropertyName) => binaryPropertyName)
.map(async (binaryPropertyName) => {
const binaryData = this.helpers.assertBinaryData(i, binaryPropertyName);
const buffer = await this.helpers.getBinaryDataBuffer(i, binaryPropertyName);
return await uploadFile.call(this, buffer, binaryData.mimeType);
});
const filesData = await Promise.all(promises);
contents = [
{
role: 'user',
parts: filesData.map((fileData) => ({
fileData,
})),
},
];
}
const text = `Generate a transcript of the speech${
options.startTime ? ` from ${options.startTime as string}` : ''
}${options.endTime ? ` to ${options.endTime as string}` : ''}`;
contents[0].parts.push({ text });
const body = {
contents,
};
const response = (await apiRequest.call(this, 'POST', `/v1beta/${model}:generateContent`, {
body,
})) as GenerateContentResponse;
if (simplify) {
return response.candidates.map((candidate) => ({
json: candidate,
pairedItem: { item: i },
}));
}
return [
{
json: { ...response },
pairedItem: { item: i },
},
];
}

View File

@@ -0,0 +1,26 @@
import type { INodeProperties } from 'n8n-workflow';
export const modelRLC = (searchListMethod: string): INodeProperties => ({
displayName: 'Model',
name: 'modelId',
type: 'resourceLocator',
default: { mode: 'list', value: '' },
required: true,
modes: [
{
displayName: 'From List',
name: 'list',
type: 'list',
typeOptions: {
searchListMethod,
searchable: true,
},
},
{
displayName: 'ID',
name: 'id',
type: 'string',
placeholder: 'e.g. models/gemini-2.5-flash',
},
],
});

View File

@@ -0,0 +1,103 @@
import type { IExecuteFunctions, INodeExecutionData, INodeProperties } from 'n8n-workflow';
import { updateDisplayOptions } from 'n8n-workflow';
import { baseAnalyze } from '../../helpers/baseAnalyze';
import { modelRLC } from '../descriptions';
const properties: INodeProperties[] = [
modelRLC('modelSearch'),
{
displayName: 'Text Input',
name: 'text',
type: 'string',
placeholder: "e.g. What's in this document?",
default: "What's in this document?",
typeOptions: {
rows: 2,
},
},
{
displayName: 'Input Type',
name: 'inputType',
type: 'options',
default: 'url',
options: [
{
name: 'Document URL(s)',
value: 'url',
},
{
name: 'Binary File(s)',
value: 'binary',
},
],
},
{
displayName: 'URL(s)',
name: 'documentUrls',
type: 'string',
placeholder: 'e.g. https://example.com/document.pdf',
description:
'URL(s) of the document(s) to analyze, multiple URLs can be added separated by comma',
default: '',
displayOptions: {
show: {
inputType: ['url'],
},
},
},
{
displayName: 'Input Data Field Name(s)',
name: 'binaryPropertyName',
type: 'string',
default: 'data',
placeholder: 'e.g. data',
hint: 'The name of the input field containing the binary file data to be processed',
description:
'Name of the binary field(s) which contains the document(s), seperate multiple field names with commas',
displayOptions: {
show: {
inputType: ['binary'],
},
},
},
{
displayName: 'Simplify Output',
name: 'simplify',
type: 'boolean',
default: true,
description: 'Whether to simplify the response or not',
},
{
displayName: 'Options',
name: 'options',
placeholder: 'Add Option',
type: 'collection',
default: {},
options: [
{
displayName: 'Length of Description (Max Tokens)',
description: 'Fewer tokens will result in shorter, less detailed document description',
name: 'maxOutputTokens',
type: 'number',
default: 300,
typeOptions: {
minValue: 1,
},
},
],
},
];
const displayOptions = {
show: {
operation: ['analyze'],
resource: ['document'],
},
};
export const description = updateDisplayOptions(displayOptions, properties);
export async function execute(this: IExecuteFunctions, i: number): Promise<INodeExecutionData[]> {
return await baseAnalyze.call(this, i, 'documentUrls', 'application/pdf');
}

View File

@@ -0,0 +1,29 @@
import type { INodeProperties } from 'n8n-workflow';
import * as analyze from './analyze.operation';
export { analyze };
export const description: INodeProperties[] = [
{
displayName: 'Operation',
name: 'operation',
type: 'options',
noDataExpression: true,
options: [
{
name: 'Analyze Document',
value: 'analyze',
action: 'Analyze document',
description: 'Take in documents and answer questions about them',
},
],
default: 'analyze',
displayOptions: {
show: {
resource: ['document'],
},
},
},
...analyze.description,
];

View File

@@ -0,0 +1,29 @@
import type { INodeProperties } from 'n8n-workflow';
import * as upload from './upload.operation';
export { upload };
export const description: INodeProperties[] = [
{
displayName: 'Operation',
name: 'operation',
type: 'options',
noDataExpression: true,
options: [
{
name: 'Upload File',
value: 'upload',
action: 'Upload a file',
description: 'Upload a file to the Google Gemini API for later use',
},
],
default: 'upload',
displayOptions: {
show: {
resource: ['file'],
},
},
},
...upload.description,
];

View File

@@ -0,0 +1,93 @@
import type { IExecuteFunctions, INodeExecutionData, INodeProperties } from 'n8n-workflow';
import { updateDisplayOptions } from 'n8n-workflow';
import { downloadFile, uploadFile } from '../../helpers/utils';
export const properties: INodeProperties[] = [
{
displayName: 'Input Type',
name: 'inputType',
type: 'options',
default: 'url',
options: [
{
name: 'File URL',
value: 'url',
},
{
name: 'Binary File',
value: 'binary',
},
],
},
{
displayName: 'URL',
name: 'fileUrl',
type: 'string',
placeholder: 'e.g. https://example.com/file.pdf',
description: 'URL of the file to upload',
default: '',
displayOptions: {
show: {
inputType: ['url'],
},
},
},
{
displayName: 'Input Data Field Name',
name: 'binaryPropertyName',
type: 'string',
default: 'data',
placeholder: 'e.g. data',
hint: 'The name of the input field containing the binary file data to be processed',
description: 'Name of the binary property which contains the file',
displayOptions: {
show: {
inputType: ['binary'],
},
},
},
];
const displayOptions = {
show: {
operation: ['upload'],
resource: ['file'],
},
};
export const description = updateDisplayOptions(displayOptions, properties);
export async function execute(this: IExecuteFunctions, i: number): Promise<INodeExecutionData[]> {
const inputType = this.getNodeParameter('inputType', i, 'url') as string;
if (inputType === 'url') {
const fileUrl = this.getNodeParameter('fileUrl', i, '') as string;
const { fileContent, mimeType } = await downloadFile.call(
this,
fileUrl,
'application/octet-stream',
);
const response = await uploadFile.call(this, fileContent, mimeType);
return [
{
json: response,
pairedItem: {
item: i,
},
},
];
} else {
const binaryPropertyName = this.getNodeParameter('binaryPropertyName', i, 'data');
const binaryData = this.helpers.assertBinaryData(i, binaryPropertyName);
const buffer = await this.helpers.getBinaryDataBuffer(i, binaryPropertyName);
const response = await uploadFile.call(this, buffer, binaryData.mimeType);
return [
{
json: response,
pairedItem: {
item: i,
},
},
];
}
}

View File

@@ -0,0 +1,102 @@
import type { IExecuteFunctions, INodeExecutionData, INodeProperties } from 'n8n-workflow';
import { updateDisplayOptions } from 'n8n-workflow';
import { baseAnalyze } from '../../helpers/baseAnalyze';
import { modelRLC } from '../descriptions';
const properties: INodeProperties[] = [
modelRLC('modelSearch'),
{
displayName: 'Text Input',
name: 'text',
type: 'string',
placeholder: "e.g. What's in this image?",
default: "What's in this image?",
typeOptions: {
rows: 2,
},
},
{
displayName: 'Input Type',
name: 'inputType',
type: 'options',
default: 'url',
options: [
{
name: 'Image URL(s)',
value: 'url',
},
{
name: 'Binary File(s)',
value: 'binary',
},
],
},
{
displayName: 'URL(s)',
name: 'imageUrls',
type: 'string',
placeholder: 'e.g. https://example.com/image.png',
description: 'URL(s) of the image(s) to analyze, multiple URLs can be added separated by comma',
default: '',
displayOptions: {
show: {
inputType: ['url'],
},
},
},
{
displayName: 'Input Data Field Name(s)',
name: 'binaryPropertyName',
type: 'string',
default: 'data',
placeholder: 'e.g. data',
hint: 'The name of the input field containing the binary file data to be processed',
description:
'Name of the binary field(s) which contains the image(s), separate multiple field names with commas',
displayOptions: {
show: {
inputType: ['binary'],
},
},
},
{
displayName: 'Simplify Output',
name: 'simplify',
type: 'boolean',
default: true,
description: 'Whether to simplify the response or not',
},
{
displayName: 'Options',
name: 'options',
placeholder: 'Add Option',
type: 'collection',
default: {},
options: [
{
displayName: 'Length of Description (Max Tokens)',
description: 'Fewer tokens will result in shorter, less detailed image description',
name: 'maxOutputTokens',
type: 'number',
default: 300,
typeOptions: {
minValue: 1,
},
},
],
},
];
const displayOptions = {
show: {
operation: ['analyze'],
resource: ['image'],
},
};
export const description = updateDisplayOptions(displayOptions, properties);
export async function execute(this: IExecuteFunctions, i: number): Promise<INodeExecutionData[]> {
return await baseAnalyze.call(this, i, 'imageUrls', 'image/png');
}

View File

@@ -0,0 +1,152 @@
import type { IExecuteFunctions, INodeExecutionData, INodeProperties } from 'n8n-workflow';
import { NodeOperationError, updateDisplayOptions } from 'n8n-workflow';
import type { GenerateContentResponse, ImagenResponse } from '../../helpers/interfaces';
import { apiRequest } from '../../transport';
import { modelRLC } from '../descriptions';
const properties: INodeProperties[] = [
modelRLC('imageGenerationModelSearch'),
{
displayName: 'Prompt',
name: 'prompt',
type: 'string',
placeholder: 'e.g. A cute cat eating a dinosaur',
description: 'A text description of the desired image(s)',
default: '',
typeOptions: {
rows: 2,
},
},
{
displayName: 'Options',
name: 'options',
placeholder: 'Add Option',
type: 'collection',
default: {},
options: [
{
displayName: 'Number of Images',
name: 'sampleCount',
default: 1,
description:
'Number of images to generate. Not supported by Gemini models, supported by Imagen models.',
type: 'number',
typeOptions: {
minValue: 1,
},
},
{
displayName: 'Put Output in Field',
name: 'binaryPropertyOutput',
type: 'string',
default: 'data',
hint: 'The name of the output field to put the binary file data in',
},
],
},
];
const displayOptions = {
show: {
operation: ['generate'],
resource: ['image'],
},
};
export const description = updateDisplayOptions(displayOptions, properties);
export async function execute(this: IExecuteFunctions, i: number): Promise<INodeExecutionData[]> {
const model = this.getNodeParameter('modelId', i, '', { extractValue: true }) as string;
const prompt = this.getNodeParameter('prompt', i, '') as string;
const binaryPropertyOutput = this.getNodeParameter(
'options.binaryPropertyOutput',
i,
'data',
) as string;
if (model.includes('gemini')) {
const generationConfig = {
responseModalities: ['IMAGE', 'TEXT'],
};
const body = {
contents: [
{
role: 'user',
parts: [{ text: prompt }],
},
],
generationConfig,
};
const response = (await apiRequest.call(this, 'POST', `/v1beta/${model}:generateContent`, {
body,
})) as GenerateContentResponse;
const promises = response.candidates.map(async (candidate) => {
const imagePart = candidate.content.parts.find((part) => 'inlineData' in part);
const buffer = Buffer.from(imagePart?.inlineData.data ?? '', 'base64');
const binaryData = await this.helpers.prepareBinaryData(
buffer,
'image.png',
imagePart?.inlineData.mimeType,
);
return {
binary: {
[binaryPropertyOutput]: binaryData,
},
json: {
...binaryData,
data: undefined,
},
pairedItem: { item: i },
};
});
return await Promise.all(promises);
} else if (model.includes('imagen')) {
// Imagen models use a different endpoint and request/response structure
const sampleCount = this.getNodeParameter('options.sampleCount', i, 1) as number;
const body = {
instances: [
{
prompt,
},
],
parameters: {
sampleCount,
},
};
const response = (await apiRequest.call(this, 'POST', `/v1beta/${model}:predict`, {
body,
})) as ImagenResponse;
const promises = response.predictions.map(async (prediction) => {
const buffer = Buffer.from(prediction.bytesBase64Encoded ?? '', 'base64');
const binaryData = await this.helpers.prepareBinaryData(
buffer,
'image.png',
prediction.mimeType,
);
return {
binary: {
[binaryPropertyOutput]: binaryData,
},
json: {
...binaryData,
data: undefined,
},
pairedItem: { item: i },
};
});
return await Promise.all(promises);
}
throw new NodeOperationError(
this.getNode(),
`Model ${model} is not supported for image generation`,
{
description: 'Please check the model ID and try again.',
},
);
}

View File

@@ -0,0 +1,37 @@
import type { INodeProperties } from 'n8n-workflow';
import * as analyze from './analyze.operation';
import * as generate from './generate.operation';
export { analyze, generate };
export const description: INodeProperties[] = [
{
displayName: 'Operation',
name: 'operation',
type: 'options',
noDataExpression: true,
options: [
{
name: 'Analyze Image',
value: 'analyze',
action: 'Analyze image',
description: 'Take in images and answer questions about them',
},
{
name: 'Generate an Image',
value: 'generate',
action: 'Generate an image',
description: 'Creates an image from a text prompt',
},
],
default: 'generate',
displayOptions: {
show: {
resource: ['image'],
},
},
},
...analyze.description,
...generate.description,
];

View File

@@ -0,0 +1,12 @@
import type { AllEntities } from 'n8n-workflow';
type NodeMap = {
text: 'message';
image: 'analyze' | 'generate';
video: 'analyze' | 'generate' | 'download';
audio: 'transcribe' | 'analyze';
document: 'analyze';
file: 'upload';
};
export type GoogleGeminiType = AllEntities<NodeMap>;

View File

@@ -0,0 +1,127 @@
import { mockDeep } from 'jest-mock-extended';
import type { IExecuteFunctions } from 'n8n-workflow';
import * as audio from './audio';
import * as document from './document';
import * as file from './file';
import * as image from './image';
import { router } from './router';
import * as text from './text';
import * as video from './video';
describe('Google Gemini router', () => {
const mockExecuteFunctions = mockDeep<IExecuteFunctions>();
const mockAudio = jest.spyOn(audio.analyze, 'execute');
const mockDocument = jest.spyOn(document.analyze, 'execute');
const mockFile = jest.spyOn(file.upload, 'execute');
const mockImage = jest.spyOn(image.analyze, 'execute');
const mockText = jest.spyOn(text.message, 'execute');
const mockVideo = jest.spyOn(video.analyze, 'execute');
const operationMocks = [
[mockAudio, 'audio', 'analyze'],
[mockDocument, 'document', 'analyze'],
[mockFile, 'file', 'upload'],
[mockImage, 'image', 'analyze'],
[mockText, 'text', 'message'],
[mockVideo, 'video', 'analyze'],
];
beforeEach(() => {
jest.clearAllMocks();
});
it.each(operationMocks)('should call the correct method', async (mock, resource, operation) => {
mockExecuteFunctions.getNodeParameter.mockImplementation((parameter) =>
parameter === 'resource' ? resource : operation,
);
mockExecuteFunctions.getInputData.mockReturnValue([
{
json: {},
},
]);
(mock as jest.Mock).mockResolvedValue([
{
json: {
foo: 'bar',
},
},
]);
const result = await router.call(mockExecuteFunctions);
expect(mock).toHaveBeenCalledWith(0);
expect(result).toEqual([[{ json: { foo: 'bar' } }]]);
});
it('should return an error if the operation is not supported', async () => {
mockExecuteFunctions.getNodeParameter.mockImplementation((parameter) =>
parameter === 'resource' ? 'foo' : 'bar',
);
mockExecuteFunctions.getInputData.mockReturnValue([{ json: {} }]);
await expect(router.call(mockExecuteFunctions)).rejects.toThrow(
'The operation "bar" is not supported!',
);
});
it('should loop over all items', async () => {
mockExecuteFunctions.getNodeParameter.mockImplementation((parameter) =>
parameter === 'resource' ? 'audio' : 'analyze',
);
mockExecuteFunctions.getInputData.mockReturnValue([
{
json: {
text: 'item 1',
},
},
{
json: {
text: 'item 2',
},
},
{
json: {
text: 'item 3',
},
},
]);
mockAudio.mockResolvedValueOnce([{ json: { response: 'foo' } }]);
mockAudio.mockResolvedValueOnce([{ json: { response: 'bar' } }]);
mockAudio.mockResolvedValueOnce([{ json: { response: 'baz' } }]);
const result = await router.call(mockExecuteFunctions);
expect(result).toEqual([
[{ json: { response: 'foo' } }, { json: { response: 'bar' } }, { json: { response: 'baz' } }],
]);
});
it('should continue on fail', async () => {
mockExecuteFunctions.continueOnFail.mockReturnValue(true);
mockExecuteFunctions.getNodeParameter.mockImplementation((parameter) =>
parameter === 'resource' ? 'audio' : 'analyze',
);
mockExecuteFunctions.getInputData.mockReturnValue([{ json: {} }, { json: {} }]);
mockAudio.mockRejectedValue(new Error('Some error'));
const result = await router.call(mockExecuteFunctions);
expect(result).toEqual([
[
{ json: { error: 'Some error' }, pairedItem: { item: 0 } },
{ json: { error: 'Some error' }, pairedItem: { item: 1 } },
],
]);
});
it('should throw an error if continueOnFail is false', async () => {
mockExecuteFunctions.continueOnFail.mockReturnValue(false);
mockExecuteFunctions.getNodeParameter.mockImplementation((parameter) =>
parameter === 'resource' ? 'audio' : 'analyze',
);
mockExecuteFunctions.getInputData.mockReturnValue([{ json: {} }]);
mockAudio.mockRejectedValue(new Error('Some error'));
await expect(router.call(mockExecuteFunctions)).rejects.toThrow('Some error');
});
});

View File

@@ -0,0 +1,68 @@
import { NodeOperationError, type IExecuteFunctions, type INodeExecutionData } from 'n8n-workflow';
import * as audio from './audio';
import * as document from './document';
import * as file from './file';
import * as image from './image';
import type { GoogleGeminiType } from './node.type';
import * as text from './text';
import * as video from './video';
export async function router(this: IExecuteFunctions) {
const returnData: INodeExecutionData[] = [];
const items = this.getInputData();
const resource = this.getNodeParameter('resource', 0);
const operation = this.getNodeParameter('operation', 0);
const googleGeminiTypeData = {
resource,
operation,
} as GoogleGeminiType;
let execute;
switch (googleGeminiTypeData.resource) {
case 'audio':
execute = audio[googleGeminiTypeData.operation].execute;
break;
case 'document':
execute = document[googleGeminiTypeData.operation].execute;
break;
case 'file':
execute = file[googleGeminiTypeData.operation].execute;
break;
case 'image':
execute = image[googleGeminiTypeData.operation].execute;
break;
case 'text':
execute = text[googleGeminiTypeData.operation].execute;
break;
case 'video':
execute = video[googleGeminiTypeData.operation].execute;
break;
default:
throw new NodeOperationError(
this.getNode(),
`The operation "${operation}" is not supported!`,
);
}
for (let i = 0; i < items.length; i++) {
try {
const responseData = await execute.call(this, i);
returnData.push(...responseData);
} catch (error) {
if (this.continueOnFail()) {
returnData.push({ json: { error: error.message }, pairedItem: { item: i } });
continue;
}
throw new NodeOperationError(this.getNode(), error, {
itemIndex: i,
description: error.description,
});
}
}
return [returnData];
}

View File

@@ -0,0 +1,29 @@
import type { INodeProperties } from 'n8n-workflow';
import * as message from './message.operation';
export { message };
export const description: INodeProperties[] = [
{
displayName: 'Operation',
name: 'operation',
type: 'options',
noDataExpression: true,
options: [
{
name: 'Message a Model',
value: 'message',
action: 'Message a model',
description: 'Create a completion with Google Gemini model',
},
],
default: 'message',
displayOptions: {
show: {
resource: ['text'],
},
},
},
...message.description,
];

View File

@@ -0,0 +1,338 @@
import type {
IDataObject,
IExecuteFunctions,
INodeExecutionData,
INodeProperties,
} from 'n8n-workflow';
import { updateDisplayOptions } from 'n8n-workflow';
import zodToJsonSchema from 'zod-to-json-schema';
import { getConnectedTools } from '@utils/helpers';
import type { GenerateContentResponse, Content, Tool } from '../../helpers/interfaces';
import { apiRequest } from '../../transport';
import { modelRLC } from '../descriptions';
const properties: INodeProperties[] = [
modelRLC('modelSearch'),
{
displayName: 'Messages',
name: 'messages',
type: 'fixedCollection',
typeOptions: {
sortable: true,
multipleValues: true,
},
placeholder: 'Add Message',
default: { values: [{ content: '' }] },
options: [
{
displayName: 'Values',
name: 'values',
values: [
{
displayName: 'Prompt',
name: 'content',
type: 'string',
description: 'The content of the message to be send',
default: '',
placeholder: 'e.g. Hello, how can you help me?',
typeOptions: {
rows: 2,
},
},
{
displayName: 'Role',
name: 'role',
type: 'options',
description:
"Role in shaping the model's response, it tells the model how it should behave and interact with the user",
options: [
{
name: 'User',
value: 'user',
description: 'Send a message as a user and get a response from the model',
},
{
name: 'Model',
value: 'model',
description: 'Tell the model to adopt a specific tone or personality',
},
],
default: 'user',
},
],
},
],
},
{
displayName: 'Simplify Output',
name: 'simplify',
type: 'boolean',
default: true,
description: 'Whether to return a simplified version of the response instead of the raw data',
},
{
displayName: 'Output Content as JSON',
name: 'jsonOutput',
type: 'boolean',
description: 'Whether to attempt to return the response in JSON format',
default: false,
},
{
displayName: 'Options',
name: 'options',
placeholder: 'Add Option',
type: 'collection',
default: {},
options: [
{
displayName: 'System Message',
name: 'systemMessage',
type: 'string',
default: '',
placeholder: 'e.g. You are a helpful assistant',
},
{
displayName: 'Code Execution',
name: 'codeExecution',
type: 'boolean',
default: false,
description:
'Whether to allow the model to execute code it generates to produce a response. Supported only by certain models.',
},
{
displayName: 'Frequency Penalty',
name: 'frequencyPenalty',
default: 0,
description:
"Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim",
type: 'number',
typeOptions: {
minValue: -2,
maxValue: 2,
numberPrecision: 1,
},
},
{
displayName: 'Maximum Number of Tokens',
name: 'maxOutputTokens',
default: 16,
description: 'The maximum number of tokens to generate in the completion',
type: 'number',
typeOptions: {
minValue: 1,
numberPrecision: 0,
},
},
{
displayName: 'Number of Completions',
name: 'candidateCount',
default: 1,
description: 'How many completions to generate for each prompt',
type: 'number',
typeOptions: {
minValue: 1,
maxValue: 8, // Google Gemini supports up to 8 candidates
numberPrecision: 0,
},
},
{
displayName: 'Presence Penalty',
name: 'presencePenalty',
default: 0,
description:
"Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics",
type: 'number',
typeOptions: {
minValue: -2,
maxValue: 2,
numberPrecision: 1,
},
},
{
displayName: 'Output Randomness (Temperature)',
name: 'temperature',
default: 1,
description:
'Controls the randomness of the output. Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive',
type: 'number',
typeOptions: {
minValue: 0,
maxValue: 2,
numberPrecision: 1,
},
},
{
displayName: 'Output Randomness (Top P)',
name: 'topP',
default: 1,
description: 'The maximum cumulative probability of tokens to consider when sampling',
type: 'number',
typeOptions: {
minValue: 0,
maxValue: 1,
numberPrecision: 1,
},
},
{
displayName: 'Output Randomness (Top K)',
name: 'topK',
default: 1,
description: 'The maximum number of tokens to consider when sampling',
type: 'number',
typeOptions: {
minValue: 1,
numberPrecision: 0,
},
},
{
displayName: 'Max Tool Calls Iterations',
name: 'maxToolsIterations',
type: 'number',
default: 15,
description:
'The maximum number of tool iteration cycles the LLM will run before stopping. A single iteration can contain multiple tool calls. Set to 0 for no limit',
typeOptions: {
minValue: 0,
numberPrecision: 0,
},
},
],
},
];
const displayOptions = {
show: {
operation: ['message'],
resource: ['text'],
},
};
export const description = updateDisplayOptions(displayOptions, properties);
function getToolCalls(response: GenerateContentResponse) {
return response.candidates.flatMap((c) => c.content.parts).filter((p) => 'functionCall' in p);
}
export async function execute(this: IExecuteFunctions, i: number): Promise<INodeExecutionData[]> {
const model = this.getNodeParameter('modelId', i, '', { extractValue: true }) as string;
const messages = this.getNodeParameter('messages.values', i, []) as Array<{
content: string;
role: string;
}>;
const simplify = this.getNodeParameter('simplify', i, true) as boolean;
const jsonOutput = this.getNodeParameter('jsonOutput', i, false) as boolean;
const options = this.getNodeParameter('options', i, {});
const generationConfig = {
frequencyPenalty: options.frequencyPenalty,
maxOutputTokens: options.maxOutputTokens,
candidateCount: options.candidateCount,
presencePenalty: options.presencePenalty,
temperature: options.temperature,
topP: options.topP,
topK: options.topK,
responseMimeType: jsonOutput ? 'application/json' : undefined,
};
const availableTools = await getConnectedTools(this, true);
const tools: Tool[] = [
{
functionDeclarations: availableTools.map((t) => ({
name: t.name,
description: t.description,
parameters: {
...zodToJsonSchema(t.schema, { target: 'openApi3' }),
// Google Gemini API throws an error if `additionalProperties` field is present
additionalProperties: undefined,
},
})),
},
];
if (!tools[0].functionDeclarations?.length) {
tools.pop();
}
if (options.codeExecution) {
tools.push({
codeExecution: {},
});
}
const contents: Content[] = messages.map((m) => ({
parts: [{ text: m.content }],
role: m.role,
}));
const body = {
tools,
contents,
generationConfig,
systemInstruction: options.systemMessage
? { parts: [{ text: options.systemMessage }] }
: undefined,
};
let response = (await apiRequest.call(this, 'POST', `/v1beta/${model}:generateContent`, {
body,
})) as GenerateContentResponse;
const maxToolsIterations = this.getNodeParameter('options.maxToolsIterations', i, 15) as number;
const abortSignal = this.getExecutionCancelSignal();
let currentIteration = 1;
let toolCalls = getToolCalls(response);
while (toolCalls.length) {
if (
(maxToolsIterations > 0 && currentIteration >= maxToolsIterations) ||
abortSignal?.aborted
) {
break;
}
contents.push(...response.candidates.map((c) => c.content));
for (const { functionCall } of toolCalls) {
let toolResponse;
for (const availableTool of availableTools) {
if (availableTool.name === functionCall.name) {
toolResponse = (await availableTool.invoke(functionCall.args)) as IDataObject;
}
}
contents.push({
parts: [
{
functionResponse: {
id: functionCall.id,
name: functionCall.name,
response: {
result: toolResponse,
},
},
},
],
role: 'tool',
});
}
response = (await apiRequest.call(this, 'POST', `/v1beta/${model}:generateContent`, {
body,
})) as GenerateContentResponse;
toolCalls = getToolCalls(response);
currentIteration++;
}
if (simplify) {
return response.candidates.map((candidate) => ({
json: candidate,
pairedItem: { item: i },
}));
}
return [
{
json: { ...response },
pairedItem: { item: i },
},
];
}

View File

@@ -0,0 +1,96 @@
/* eslint-disable n8n-nodes-base/node-filename-against-convention */
import { NodeConnectionTypes, type INodeTypeDescription } from 'n8n-workflow';
import * as audio from './audio';
import * as document from './document';
import * as file from './file';
import * as image from './image';
import * as text from './text';
import * as video from './video';
export const versionDescription: INodeTypeDescription = {
displayName: 'Google Gemini',
name: 'googleGemini',
icon: 'file:gemini.svg',
group: ['transform'],
version: 1,
subtitle: '={{ $parameter["operation"] + ": " + $parameter["resource"] }}',
description: 'Interact with Google Gemini AI models',
defaults: {
name: 'Google Gemini',
},
usableAsTool: true,
codex: {
alias: ['LangChain', 'video', 'document', 'audio', 'transcribe', 'assistant'],
categories: ['AI'],
subcategories: {
AI: ['Agents', 'Miscellaneous', 'Root Nodes'],
},
resources: {
primaryDocumentation: [
{
url: 'https://docs.n8n.io/integrations/builtin/app-nodes/n8n-nodes-langchain.googlegemini/',
},
],
},
},
inputs: `={{
(() => {
const resource = $parameter.resource;
const operation = $parameter.operation;
if (resource === 'text' && operation === 'message') {
return [{ type: 'main' }, { type: 'ai_tool', displayName: 'Tools' }];
}
return ['main'];
})()
}}`,
outputs: [NodeConnectionTypes.Main],
credentials: [
{
name: 'googlePalmApi',
required: true,
},
],
properties: [
{
displayName: 'Resource',
name: 'resource',
type: 'options',
noDataExpression: true,
options: [
{
name: 'Audio',
value: 'audio',
},
{
name: 'Document',
value: 'document',
},
{
name: 'File',
value: 'file',
},
{
name: 'Image',
value: 'image',
},
{
name: 'Text',
value: 'text',
},
{
name: 'Video',
value: 'video',
},
],
default: 'text',
},
...audio.description,
...document.description,
...file.description,
...image.description,
...text.description,
...video.description,
],
};

View File

@@ -0,0 +1,102 @@
import type { IExecuteFunctions, INodeExecutionData, INodeProperties } from 'n8n-workflow';
import { updateDisplayOptions } from 'n8n-workflow';
import { baseAnalyze } from '../../helpers/baseAnalyze';
import { modelRLC } from '../descriptions';
const properties: INodeProperties[] = [
modelRLC('modelSearch'),
{
displayName: 'Text Input',
name: 'text',
type: 'string',
placeholder: "e.g. What's in this video?",
default: "What's in this video?",
typeOptions: {
rows: 2,
},
},
{
displayName: 'Input Type',
name: 'inputType',
type: 'options',
default: 'url',
options: [
{
name: 'Video URL(s)',
value: 'url',
},
{
name: 'Binary File(s)',
value: 'binary',
},
],
},
{
displayName: 'URL(s)',
name: 'videoUrls',
type: 'string',
placeholder: 'e.g. https://example.com/video.mp4',
description: 'URL(s) of the video(s) to analyze, multiple URLs can be added separated by comma',
default: '',
displayOptions: {
show: {
inputType: ['url'],
},
},
},
{
displayName: 'Input Data Field Name(s)',
name: 'binaryPropertyName',
type: 'string',
default: 'data',
placeholder: 'e.g. data',
hint: 'The name of the input field containing the binary file data to be processed',
description:
'Name of the binary field(s) which contains the video(s), seperate multiple field names with commas',
displayOptions: {
show: {
inputType: ['binary'],
},
},
},
{
displayName: 'Simplify Output',
name: 'simplify',
type: 'boolean',
default: true,
description: 'Whether to simplify the response or not',
},
{
displayName: 'Options',
name: 'options',
placeholder: 'Add Option',
type: 'collection',
default: {},
options: [
{
displayName: 'Length of Description (Max Tokens)',
description: 'Fewer tokens will result in shorter, less detailed video description',
name: 'maxOutputTokens',
type: 'number',
default: 300,
typeOptions: {
minValue: 1,
},
},
],
},
];
const displayOptions = {
show: {
operation: ['analyze'],
resource: ['video'],
},
};
export const description = updateDisplayOptions(displayOptions, properties);
export async function execute(this: IExecuteFunctions, i: number): Promise<INodeExecutionData[]> {
return await baseAnalyze.call(this, i, 'videoUrls', 'video/mp4');
}

View File

@@ -0,0 +1,64 @@
import type { IExecuteFunctions, INodeExecutionData, INodeProperties } from 'n8n-workflow';
import { updateDisplayOptions } from 'n8n-workflow';
import { downloadFile } from '../../helpers/utils';
const properties: INodeProperties[] = [
{
displayName: 'URL',
name: 'url',
type: 'string',
placeholder: 'e.g. https://generativelanguage.googleapis.com/v1beta/files/abcdefg:download',
description: 'The URL from Google Gemini API to download the video from',
default: '',
},
{
displayName: 'Options',
name: 'options',
placeholder: 'Add Option',
type: 'collection',
default: {},
options: [
{
displayName: 'Put Output in Field',
name: 'binaryPropertyOutput',
type: 'string',
default: 'data',
hint: 'The name of the output field to put the binary file data in',
},
],
},
];
const displayOptions = {
show: {
operation: ['download'],
resource: ['video'],
},
};
export const description = updateDisplayOptions(displayOptions, properties);
export async function execute(this: IExecuteFunctions, i: number): Promise<INodeExecutionData[]> {
const url = this.getNodeParameter('url', i, '') as string;
const binaryPropertyOutput = this.getNodeParameter(
'options.binaryPropertyOutput',
i,
'data',
) as string;
const credentials = await this.getCredentials('googlePalmApi');
const { fileContent, mimeType } = await downloadFile.call(this, url, 'video/mp4', {
key: credentials.apiKey as string,
});
const binaryData = await this.helpers.prepareBinaryData(fileContent, 'video.mp4', mimeType);
return [
{
binary: { [binaryPropertyOutput]: binaryData },
json: {
...binaryData,
data: undefined,
},
pairedItem: { item: i },
},
];
}

View File

@@ -0,0 +1,212 @@
import type { IExecuteFunctions, INodeExecutionData, INodeProperties } from 'n8n-workflow';
import { NodeOperationError, updateDisplayOptions } from 'n8n-workflow';
import type { VeoResponse } from '../../helpers/interfaces';
import { downloadFile } from '../../helpers/utils';
import { apiRequest } from '../../transport';
import { modelRLC } from '../descriptions';
const properties: INodeProperties[] = [
modelRLC('videoGenerationModelSearch'),
{
displayName: 'Prompt',
name: 'prompt',
type: 'string',
placeholder: 'e.g. Panning wide shot of a calico kitten sleeping in the sunshine',
description: 'A text description of the desired video',
default: '',
typeOptions: {
rows: 2,
},
},
{
displayName: 'Return As',
name: 'returnAs',
type: 'options',
options: [
{
name: 'Video',
value: 'video',
},
{
name: 'URL',
value: 'url',
},
],
description:
'Whether to return the video as a binary file or a URL that can be used to download the video later',
default: 'video',
},
{
displayName: 'Options',
name: 'options',
placeholder: 'Add Option',
type: 'collection',
default: {},
options: [
{
displayName: 'Number of Videos',
name: 'sampleCount',
type: 'number',
default: 1,
description: 'How many videos to generate',
typeOptions: {
minValue: 1,
maxValue: 4,
},
},
{
displayName: 'Duration (Seconds)',
name: 'durationSeconds',
type: 'number',
default: 8,
description: 'Length of the generated video in seconds',
typeOptions: {
minValue: 5,
maxValue: 8,
},
},
{
displayName: 'Aspect Ratio',
name: 'aspectRatio',
type: 'options',
options: [
{
name: 'Widescreen (16:9)',
value: '16:9',
description: 'Most common aspect ratio for televisions and monitors',
},
{
name: 'Portrait (9:16)',
value: '9:16',
description: 'Popular for short-form videos like YouTube Shorts',
},
],
default: '16:9',
},
{
displayName: 'Person Generation',
name: 'personGeneration',
type: 'options',
options: [
{
name: "Don't Allow",
value: 'dont_allow',
description: 'Prevent generation of people in the video',
},
{
name: 'Allow Adult',
value: 'allow_adult',
description: 'Allow generation of adult people in the video',
},
{
name: 'Allow All',
value: 'allow_all',
description: 'Allow generation of all people in the video',
},
],
default: 'dont_allow',
},
{
displayName: 'Put Output in Field',
name: 'binaryPropertyOutput',
type: 'string',
default: 'data',
hint: 'The name of the output field to put the binary file data in',
},
],
},
];
const displayOptions = {
show: {
operation: ['generate'],
resource: ['video'],
},
};
export const description = updateDisplayOptions(displayOptions, properties);
export async function execute(this: IExecuteFunctions, i: number): Promise<INodeExecutionData[]> {
const model = this.getNodeParameter('modelId', i, '', { extractValue: true }) as string;
const prompt = this.getNodeParameter('prompt', i, '') as string;
const returnAs = this.getNodeParameter('returnAs', i, 'video');
const options = this.getNodeParameter('options', i, {});
const binaryPropertyOutput = this.getNodeParameter(
'options.binaryPropertyOutput',
i,
'data',
) as string;
const credentials = await this.getCredentials('googlePalmApi');
if (!model.includes('veo')) {
throw new NodeOperationError(
this.getNode(),
`Model ${model} is not supported for video generation. Please use a Veo model`,
{
description: 'Video generation is only supported by Veo models',
},
);
}
const body = {
instances: [
{
prompt,
},
],
parameters: {
aspectRatio: options.aspectRatio,
personGeneration: options.personGeneration,
sampleCount: options.sampleCount ?? 1,
durationSeconds: options.durationSeconds ?? 8,
},
};
let response = (await apiRequest.call(this, 'POST', `/v1beta/${model}:predictLongRunning`, {
body,
})) as VeoResponse;
while (!response.done) {
await new Promise((resolve) => setTimeout(resolve, 5000));
response = (await apiRequest.call(this, 'GET', `/v1beta/${response.name}`)) as VeoResponse;
}
if (response.error) {
throw new NodeOperationError(this.getNode(), response.error.message, {
description: 'Error generating video',
});
}
if (returnAs === 'video') {
const promises = response.response.generateVideoResponse.generatedSamples.map(
async (sample) => {
const { fileContent, mimeType } = await downloadFile.call(
this,
sample.video.uri,
'video/mp4',
{
key: credentials.apiKey as string,
},
);
const binaryData = await this.helpers.prepareBinaryData(fileContent, 'video.mp4', mimeType);
return {
binary: { [binaryPropertyOutput]: binaryData },
json: {
...binaryData,
data: undefined,
},
pairedItem: { item: i },
};
},
);
return await Promise.all(promises);
} else {
return response.response.generateVideoResponse.generatedSamples.map((sample) => ({
json: {
url: sample.video.uri,
},
pairedItem: { item: i },
}));
}
}

View File

@@ -0,0 +1,45 @@
import type { INodeProperties } from 'n8n-workflow';
import * as analyze from './analyze.operation';
import * as download from './download.operation';
import * as generate from './generate.operation';
export { analyze, download, generate };
export const description: INodeProperties[] = [
{
displayName: 'Operation',
name: 'operation',
type: 'options',
noDataExpression: true,
options: [
{
name: 'Analyze Video',
value: 'analyze',
action: 'Analyze video',
description: 'Take in videos and answer questions about them',
},
{
name: 'Generate a Video',
value: 'generate',
action: 'Generate a video',
description: 'Creates a video from a text prompt',
},
{
name: 'Download Video',
value: 'download',
action: 'Download a video',
description: 'Download a generated video from the Google Gemini API using a URL',
},
],
default: 'generate',
displayOptions: {
show: {
resource: ['video'],
},
},
},
...analyze.description,
...download.description,
...generate.description,
];

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 11 KiB

View File

@@ -0,0 +1,98 @@
import type { IExecuteFunctions, INodeExecutionData } from 'n8n-workflow';
import type { Content, GenerateContentResponse } from './interfaces';
import { downloadFile, uploadFile } from './utils';
import { apiRequest } from '../transport';
export async function baseAnalyze(
this: IExecuteFunctions,
i: number,
urlsPropertyName: string,
fallbackMimeType: string,
): Promise<INodeExecutionData[]> {
const model = this.getNodeParameter('modelId', i, '', { extractValue: true }) as string;
const inputType = this.getNodeParameter('inputType', i, 'url') as string;
const text = this.getNodeParameter('text', i, '') as string;
const simplify = this.getNodeParameter('simplify', i, true) as boolean;
const options = this.getNodeParameter('options', i, {});
const generationConfig = {
maxOutputTokens: options.maxOutputTokens,
};
let contents: Content[];
if (inputType === 'url') {
const urls = this.getNodeParameter(urlsPropertyName, i, '') as string;
const filesDataPromises = urls
.split(',')
.map((url) => url.trim())
.filter((url) => url)
.map(async (url) => {
if (url.startsWith('https://generativelanguage.googleapis.com')) {
const { mimeType } = (await apiRequest.call(this, 'GET', '', {
option: { url },
})) as { mimeType: string };
return { fileUri: url, mimeType };
} else {
const { fileContent, mimeType } = await downloadFile.call(this, url, fallbackMimeType);
return await uploadFile.call(this, fileContent, mimeType);
}
});
const filesData = await Promise.all(filesDataPromises);
contents = [
{
role: 'user',
parts: filesData.map((fileData) => ({
fileData,
})),
},
];
} else {
const binaryPropertyNames = this.getNodeParameter('binaryPropertyName', i, 'data');
const promises = binaryPropertyNames
.split(',')
.map((binaryPropertyName) => binaryPropertyName.trim())
.filter((binaryPropertyName) => binaryPropertyName)
.map(async (binaryPropertyName) => {
const binaryData = this.helpers.assertBinaryData(i, binaryPropertyName);
const buffer = await this.helpers.getBinaryDataBuffer(i, binaryPropertyName);
return await uploadFile.call(this, buffer, binaryData.mimeType);
});
const filesData = await Promise.all(promises);
contents = [
{
role: 'user',
parts: filesData.map((fileData) => ({
fileData,
})),
},
];
}
contents[0].parts.push({ text });
const body = {
contents,
generationConfig,
};
const response = (await apiRequest.call(this, 'POST', `/v1beta/${model}:generateContent`, {
body,
})) as GenerateContentResponse;
if (simplify) {
return response.candidates.map((candidate) => ({
json: candidate,
pairedItem: { item: i },
}));
}
return [
{
json: { ...response },
pairedItem: { item: i },
},
];
}

View File

@@ -0,0 +1,74 @@
import type { IDataObject } from 'n8n-workflow';
export interface GenerateContentResponse {
candidates: Array<{
content: Content;
}>;
}
export interface Content {
parts: Part[];
role: string;
}
export type Part =
| { text: string }
| {
inlineData: {
mimeType: string;
data: string;
};
}
| {
functionCall: {
id?: string;
name: string;
args?: IDataObject;
};
}
| {
functionResponse: {
id?: string;
name: string;
response: IDataObject;
};
}
| {
fileData?: {
mimeType?: string;
fileUri?: string;
};
};
export interface ImagenResponse {
predictions: Array<{
bytesBase64Encoded: string;
mimeType: string;
}>;
}
export interface VeoResponse {
name: string;
done: boolean;
error?: {
message: string;
};
response: {
generateVideoResponse: {
generatedSamples: Array<{
video: {
uri: string;
};
}>;
};
};
}
export interface Tool {
functionDeclarations?: Array<{
name: string;
description: string;
parameters: IDataObject;
}>;
codeExecution?: object;
}

View File

@@ -0,0 +1,180 @@
import { mockDeep } from 'jest-mock-extended';
import type { IExecuteFunctions } from 'n8n-workflow';
import { downloadFile, uploadFile } from './utils';
import * as transport from '../transport';
describe('GoogleGemini -> utils', () => {
const mockExecuteFunctions = mockDeep<IExecuteFunctions>();
const apiRequestMock = jest.spyOn(transport, 'apiRequest');
beforeEach(() => {
jest.clearAllMocks();
jest.useFakeTimers({ advanceTimers: true });
});
describe('downloadFile', () => {
it('should download file', async () => {
mockExecuteFunctions.helpers.httpRequest.mockResolvedValue({
body: new ArrayBuffer(10),
headers: {
'content-type': 'application/pdf',
},
});
const file = await downloadFile.call(mockExecuteFunctions, 'https://example.com/file.pdf');
expect(file).toEqual({
fileContent: Buffer.from(new ArrayBuffer(10)),
mimeType: 'application/pdf',
});
expect(mockExecuteFunctions.helpers.httpRequest).toHaveBeenCalledWith({
method: 'GET',
url: 'https://example.com/file.pdf',
returnFullResponse: true,
encoding: 'arraybuffer',
});
});
it('should parse mime type from content type header', async () => {
mockExecuteFunctions.helpers.httpRequest.mockResolvedValue({
body: new ArrayBuffer(10),
headers: {
'content-type': 'application/pdf; q=0.9',
},
});
const file = await downloadFile.call(mockExecuteFunctions, 'https://example.com/file.pdf');
expect(file).toEqual({
fileContent: Buffer.from(new ArrayBuffer(10)),
mimeType: 'application/pdf',
});
});
it('should use fallback mime type if content type header is not present', async () => {
mockExecuteFunctions.helpers.httpRequest.mockResolvedValue({
body: new ArrayBuffer(10),
headers: {},
});
const file = await downloadFile.call(
mockExecuteFunctions,
'https://example.com/file.pdf',
'application/pdf',
);
expect(file).toEqual({
fileContent: Buffer.from(new ArrayBuffer(10)),
mimeType: 'application/pdf',
});
});
});
describe('uploadFile', () => {
it('should upload file', async () => {
const fileContent = Buffer.from(new ArrayBuffer(10));
const mimeType = 'application/pdf';
apiRequestMock.mockResolvedValue({
headers: {
'x-goog-upload-url': 'https://google.com/some-upload-url',
},
});
mockExecuteFunctions.helpers.httpRequest.mockResolvedValue({
file: {
name: 'files/test123',
uri: 'https://google.com/files/test123',
mimeType: 'application/pdf',
state: 'ACTIVE',
},
});
const file = await uploadFile.call(mockExecuteFunctions, fileContent, mimeType);
expect(file).toEqual({
fileUri: 'https://google.com/files/test123',
mimeType: 'application/pdf',
});
expect(apiRequestMock).toHaveBeenCalledWith('POST', '/upload/v1beta/files', {
headers: {
'X-Goog-Upload-Protocol': 'resumable',
'X-Goog-Upload-Command': 'start',
'X-Goog-Upload-Header-Content-Length': '10',
'X-Goog-Upload-Header-Content-Type': 'application/pdf',
'Content-Type': 'application/json',
},
option: {
returnFullResponse: true,
},
});
expect(mockExecuteFunctions.helpers.httpRequest).toHaveBeenCalledWith({
method: 'POST',
url: 'https://google.com/some-upload-url',
headers: {
'Content-Length': '10',
'X-Goog-Upload-Offset': '0',
'X-Goog-Upload-Command': 'upload, finalize',
},
body: fileContent,
});
});
it('should throw error if file upload fails', async () => {
const fileContent = Buffer.from(new ArrayBuffer(10));
const mimeType = 'application/pdf';
apiRequestMock.mockResolvedValue({
headers: {
'x-goog-upload-url': 'https://google.com/some-upload-url',
},
});
mockExecuteFunctions.helpers.httpRequest.mockResolvedValue({
file: {
state: 'FAILED',
error: {
message: 'File upload failed',
},
},
});
await expect(uploadFile.call(mockExecuteFunctions, fileContent, mimeType)).rejects.toThrow(
'File upload failed',
);
});
it('should upload file when its not immediately active', async () => {
const fileContent = Buffer.from(new ArrayBuffer(10));
const mimeType = 'application/pdf';
apiRequestMock.mockResolvedValueOnce({
headers: {
'x-goog-upload-url': 'https://google.com/some-upload-url',
},
});
mockExecuteFunctions.helpers.httpRequest.mockResolvedValue({
file: {
name: 'files/test123',
uri: 'https://google.com/files/test123',
mimeType: 'application/pdf',
state: 'PENDING',
},
});
apiRequestMock.mockResolvedValueOnce({
name: 'files/test123',
uri: 'https://google.com/files/test123',
mimeType: 'application/pdf',
state: 'ACTIVE',
});
const promise = uploadFile.call(mockExecuteFunctions, fileContent, mimeType);
await jest.advanceTimersByTimeAsync(1000);
const file = await promise;
expect(file).toEqual({
fileUri: 'https://google.com/files/test123',
mimeType: 'application/pdf',
});
expect(apiRequestMock).toHaveBeenCalledWith('GET', '/v1beta/files/test123');
});
});
});

View File

@@ -0,0 +1,84 @@
import type { IDataObject, IExecuteFunctions } from 'n8n-workflow';
import { NodeOperationError } from 'n8n-workflow';
import { apiRequest } from '../transport';
interface File {
name: string;
uri: string;
mimeType: string;
state: string;
error?: { message: string };
}
export async function downloadFile(
this: IExecuteFunctions,
url: string,
fallbackMimeType?: string,
qs?: IDataObject,
) {
const downloadResponse = (await this.helpers.httpRequest({
method: 'GET',
url,
qs,
returnFullResponse: true,
encoding: 'arraybuffer',
})) as { body: ArrayBuffer; headers: IDataObject };
const mimeType =
(downloadResponse.headers?.['content-type'] as string)?.split(';')?.[0] ?? fallbackMimeType;
const fileContent = Buffer.from(downloadResponse.body);
return {
fileContent,
mimeType,
};
}
export async function uploadFile(this: IExecuteFunctions, fileContent: Buffer, mimeType: string) {
const numBytes = fileContent.length.toString();
const uploadInitResponse = (await apiRequest.call(this, 'POST', '/upload/v1beta/files', {
headers: {
'X-Goog-Upload-Protocol': 'resumable',
'X-Goog-Upload-Command': 'start',
'X-Goog-Upload-Header-Content-Length': numBytes,
'X-Goog-Upload-Header-Content-Type': mimeType,
'Content-Type': 'application/json',
},
option: {
returnFullResponse: true,
},
})) as { headers: IDataObject };
const uploadUrl = uploadInitResponse.headers['x-goog-upload-url'] as string;
const uploadResponse = (await this.helpers.httpRequest({
method: 'POST',
url: uploadUrl,
headers: {
'Content-Length': numBytes,
'X-Goog-Upload-Offset': '0',
'X-Goog-Upload-Command': 'upload, finalize',
},
body: fileContent,
})) as { file: File };
while (uploadResponse.file.state !== 'ACTIVE' && uploadResponse.file.state !== 'FAILED') {
await new Promise((resolve) => setTimeout(resolve, 1000));
uploadResponse.file = (await apiRequest.call(
this,
'GET',
`/v1beta/${uploadResponse.file.name}`,
)) as File;
}
if (uploadResponse.file.state === 'FAILED') {
throw new NodeOperationError(
this.getNode(),
uploadResponse.file.error?.message ?? 'Unknown error',
{
description: 'Error uploading file',
},
);
}
return { fileUri: uploadResponse.file.uri, mimeType: uploadResponse.file.mimeType };
}

View File

@@ -0,0 +1 @@
export * as listSearch from './listSearch';

View File

@@ -0,0 +1,150 @@
import { mock } from 'jest-mock-extended';
import type { ILoadOptionsFunctions } from 'n8n-workflow';
import {
audioModelSearch,
imageGenerationModelSearch,
modelSearch,
videoGenerationModelSearch,
} from './listSearch';
import * as transport from '../transport';
const mockResponse = {
models: [
{
name: 'models/gemini-pro-vision',
},
{
name: 'models/gemini-2.5-flash',
},
{
name: 'models/gemini-2.0-flash-exp-image-generation',
},
{
name: 'models/gemini-2.5-pro-preview-tts',
},
{
name: 'models/gemma-3-1b-it',
},
{
name: 'models/embedding-001',
},
{
name: 'models/imagen-3.0-generate-002',
},
{
name: 'models/veo-2.0-generate-001',
},
{
name: 'models/gemini-2.5-flash-preview-native-audio-dialog',
},
],
};
describe('GoogleGemini -> listSearch', () => {
const mockExecuteFunctions = mock<ILoadOptionsFunctions>();
const apiRequestMock = jest.spyOn(transport, 'apiRequest');
beforeEach(() => {
jest.clearAllMocks();
});
describe('modelSearch', () => {
it('should return regular models', async () => {
apiRequestMock.mockResolvedValue(mockResponse);
const result = await modelSearch.call(mockExecuteFunctions);
expect(result).toEqual({
results: [
{
name: 'models/gemini-2.5-flash',
value: 'models/gemini-2.5-flash',
},
{
name: 'models/gemma-3-1b-it',
value: 'models/gemma-3-1b-it',
},
],
});
});
it('should return regular models with filter', async () => {
apiRequestMock.mockResolvedValue(mockResponse);
const result = await modelSearch.call(mockExecuteFunctions, 'Gemma');
expect(result).toEqual({
results: [
{
name: 'models/gemma-3-1b-it',
value: 'models/gemma-3-1b-it',
},
],
});
});
});
describe('audioModelSearch', () => {
it('should return audio models', async () => {
apiRequestMock.mockResolvedValue(mockResponse);
const result = await audioModelSearch.call(mockExecuteFunctions);
expect(result).toEqual({
results: [
{
name: 'models/gemini-2.5-flash',
value: 'models/gemini-2.5-flash',
},
{
name: 'models/gemma-3-1b-it',
value: 'models/gemma-3-1b-it',
},
{
name: 'models/gemini-2.5-flash-preview-native-audio-dialog',
value: 'models/gemini-2.5-flash-preview-native-audio-dialog',
},
],
});
});
});
describe('imageModelSearch', () => {
it('should return image models', async () => {
apiRequestMock.mockResolvedValue(mockResponse);
const result = await imageGenerationModelSearch.call(mockExecuteFunctions);
expect(result).toEqual({
results: [
{
name: 'models/gemini-2.0-flash-exp-image-generation',
value: 'models/gemini-2.0-flash-exp-image-generation',
},
{
name: 'models/imagen-3.0-generate-002',
value: 'models/imagen-3.0-generate-002',
},
],
});
});
});
describe('videoModelSearch', () => {
it('should return video models', async () => {
apiRequestMock.mockResolvedValue(mockResponse);
const result = await videoGenerationModelSearch.call(mockExecuteFunctions);
expect(result).toEqual({
results: [
{
name: 'models/veo-2.0-generate-001',
value: 'models/veo-2.0-generate-001',
},
],
});
});
});
});

View File

@@ -0,0 +1,79 @@
import type { ILoadOptionsFunctions, INodeListSearchResult } from 'n8n-workflow';
import { apiRequest } from '../transport';
async function baseModelSearch(
this: ILoadOptionsFunctions,
modelFilter: (model: string) => boolean,
filter?: string,
): Promise<INodeListSearchResult> {
const response = (await apiRequest.call(this, 'GET', '/v1beta/models', {
qs: {
pageSize: 1000,
},
})) as {
models: Array<{ name: string }>;
};
let models = response.models.filter((model) => modelFilter(model.name));
if (filter) {
models = models.filter((model) => model.name.toLowerCase().includes(filter.toLowerCase()));
}
return {
results: models.map((model) => ({ name: model.name, value: model.name })),
};
}
export async function modelSearch(
this: ILoadOptionsFunctions,
filter?: string,
): Promise<INodeListSearchResult> {
return await baseModelSearch.call(
this,
(model) =>
!model.includes('embedding') &&
!model.includes('aqa') &&
!model.includes('image') &&
!model.includes('vision') &&
!model.includes('veo') &&
!model.includes('audio') &&
!model.includes('tts'),
filter,
);
}
export async function audioModelSearch(
this: ILoadOptionsFunctions,
filter?: string,
): Promise<INodeListSearchResult> {
return await baseModelSearch.call(
this,
(model) =>
!model.includes('embedding') &&
!model.includes('aqa') &&
!model.includes('image') &&
!model.includes('vision') &&
!model.includes('veo') &&
!model.includes('tts'), // we don't have a tts operation
filter,
);
}
export async function imageGenerationModelSearch(
this: ILoadOptionsFunctions,
filter?: string,
): Promise<INodeListSearchResult> {
return await baseModelSearch.call(
this,
(model) => model.includes('imagen') || model.includes('image-generation'),
filter,
);
}
export async function videoGenerationModelSearch(
this: ILoadOptionsFunctions,
filter?: string,
): Promise<INodeListSearchResult> {
return await baseModelSearch.call(this, (model) => model.includes('veo'), filter);
}

View File

@@ -0,0 +1,83 @@
import type { IExecuteFunctions } from 'n8n-workflow';
import { mockDeep } from 'jest-mock-extended';
import { apiRequest } from '.';
describe('GoogleGemini transport', () => {
const executeFunctionsMock = mockDeep<IExecuteFunctions>();
beforeEach(() => {
jest.clearAllMocks();
});
it('should call httpRequestWithAuthentication with correct parameters', async () => {
executeFunctionsMock.getCredentials.mockResolvedValue({
url: 'https://custom-url.com',
});
await apiRequest.call(executeFunctionsMock, 'GET', '/v1beta/models', {
headers: {
'Content-Type': 'application/json',
},
body: {
foo: 'bar',
},
qs: {
test: 123,
},
});
expect(executeFunctionsMock.helpers.httpRequestWithAuthentication).toHaveBeenCalledWith(
'googlePalmApi',
{
method: 'GET',
url: 'https://custom-url.com/v1beta/models',
json: true,
body: {
foo: 'bar',
},
qs: {
test: 123,
},
headers: {
'Content-Type': 'application/json',
},
},
);
});
it('should use the default url if no custom url is provided', async () => {
executeFunctionsMock.getCredentials.mockResolvedValue({});
await apiRequest.call(executeFunctionsMock, 'GET', '/v1beta/models');
expect(executeFunctionsMock.helpers.httpRequestWithAuthentication).toHaveBeenCalledWith(
'googlePalmApi',
{
method: 'GET',
url: 'https://generativelanguage.googleapis.com/v1beta/models',
json: true,
},
);
});
it('should override the values with `option`', async () => {
executeFunctionsMock.getCredentials.mockResolvedValue({});
await apiRequest.call(executeFunctionsMock, 'GET', '', {
option: {
url: 'https://custom-url.com',
returnFullResponse: true,
},
});
expect(executeFunctionsMock.helpers.httpRequestWithAuthentication).toHaveBeenCalledWith(
'googlePalmApi',
{
method: 'GET',
url: 'https://custom-url.com',
json: true,
returnFullResponse: true,
},
);
});
});

View File

@@ -0,0 +1,45 @@
import type {
IDataObject,
IExecuteFunctions,
IHttpRequestMethods,
ILoadOptionsFunctions,
} from 'n8n-workflow';
type RequestParameters = {
headers?: IDataObject;
body?: IDataObject | string;
qs?: IDataObject;
option?: IDataObject;
};
export async function apiRequest(
this: IExecuteFunctions | ILoadOptionsFunctions,
method: IHttpRequestMethods,
endpoint: string,
parameters?: RequestParameters,
) {
const { body, qs, option, headers } = parameters ?? {};
const credentials = await this.getCredentials('googlePalmApi');
let url = `https://generativelanguage.googleapis.com${endpoint}`;
if (credentials.url) {
url = `${credentials?.url as string}${endpoint}`;
}
const options = {
headers,
method,
body,
qs,
url,
json: true,
};
if (option && Object.keys(option).length !== 0) {
Object.assign(options, option);
}
return await this.helpers.httpRequestWithAuthentication.call(this, 'googlePalmApi', options);
}

View File

@@ -48,6 +48,7 @@
"dist/credentials/ZepApi.credentials.js" "dist/credentials/ZepApi.credentials.js"
], ],
"nodes": [ "nodes": [
"dist/nodes/vendors/GoogleGemini/GoogleGemini.node.js",
"dist/nodes/vendors/OpenAi/OpenAi.node.js", "dist/nodes/vendors/OpenAi/OpenAi.node.js",
"dist/nodes/agents/Agent/Agent.node.js", "dist/nodes/agents/Agent/Agent.node.js",
"dist/nodes/agents/Agent/AgentTool.node.js", "dist/nodes/agents/Agent/AgentTool.node.js",