feat(Google Gemini Chat Model Node): Add support for new Google Gemini models (#9130)

Signed-off-by: Oleg Ivaniv <me@olegivaniv.com>
Co-authored-by: Michael Kret <michael.k@radency.com>
This commit is contained in:
oleg
2024-04-15 13:56:44 +02:00
committed by GitHub
parent fa93fb81b0
commit f1215cdb6b
11 changed files with 483 additions and 14 deletions

View File

@@ -0,0 +1,233 @@
/* eslint-disable n8n-nodes-base/node-dirname-against-convention */
import {
NodeConnectionType,
type IExecuteFunctions,
type INodeType,
type INodeTypeDescription,
type SupplyData,
} from 'n8n-workflow';
import { ChatGoogleGenerativeAI } from '@langchain/google-genai';
import type { HarmBlockThreshold, HarmCategory, SafetySetting } from '@google/generative-ai';
import { logWrapper } from '../../../utils/logWrapper';
import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
import { harmCategories, harmThresholds } from './options';
export class LmChatGoogleGemini implements INodeType {
description: INodeTypeDescription = {
displayName: 'Google Gemini Chat Model',
// eslint-disable-next-line n8n-nodes-base/node-class-description-name-miscased
name: 'lmChatGoogleGemini',
icon: 'file:google.svg',
group: ['transform'],
version: 1,
description: 'Chat Model Google Gemini',
defaults: {
name: 'Google Gemini Chat Model',
},
codex: {
categories: ['AI'],
subcategories: {
AI: ['Language Models'],
},
resources: {
primaryDocumentation: [
{
url: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatgooglegemini/',
},
],
},
},
// eslint-disable-next-line n8n-nodes-base/node-class-description-inputs-wrong-regular-node
inputs: [],
// eslint-disable-next-line n8n-nodes-base/node-class-description-outputs-wrong
outputs: [NodeConnectionType.AiLanguageModel],
outputNames: ['Model'],
credentials: [
{
name: 'googlePalmApi',
required: true,
},
],
requestDefaults: {
ignoreHttpStatusErrors: true,
baseURL: '={{ $credentials.host }}',
},
properties: [
getConnectionHintNoticeField([NodeConnectionType.AiChain, NodeConnectionType.AiAgent]),
{
displayName: 'Model',
name: 'modelName',
type: 'options',
description:
'The model which will generate the completion. <a href="https://developers.generativeai.google/api/rest/generativelanguage/models/list">Learn more</a>.',
typeOptions: {
loadOptions: {
routing: {
request: {
method: 'GET',
url: '/v1beta/models',
},
output: {
postReceive: [
{
type: 'rootProperty',
properties: {
property: 'models',
},
},
{
type: 'filter',
properties: {
pass: "={{ !$responseItem.name.includes('embedding') }}",
},
},
{
type: 'setKeyValue',
properties: {
name: '={{$responseItem.name}}',
value: '={{$responseItem.name}}',
description: '={{$responseItem.description}}',
},
},
{
type: 'sort',
properties: {
key: 'name',
},
},
],
},
},
},
},
routing: {
send: {
type: 'body',
property: 'model',
},
},
default: 'models/gemini-1.0-pro',
},
{
displayName: 'Options',
name: 'options',
placeholder: 'Add Option',
description: 'Additional options to add',
type: 'collection',
default: {},
options: [
{
displayName: 'Maximum Number of Tokens',
name: 'maxOutputTokens',
default: 2048,
description: 'The maximum number of tokens to generate in the completion',
type: 'number',
},
{
displayName: 'Sampling Temperature',
name: 'temperature',
default: 0.4,
typeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },
description:
'Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.',
type: 'number',
},
{
displayName: 'Top K',
name: 'topK',
default: 32,
typeOptions: { maxValue: 40, minValue: -1, numberPrecision: 1 },
description:
'Used to remove "long tail" low probability responses. Defaults to -1, which disables it.',
type: 'number',
},
{
displayName: 'Top P',
name: 'topP',
default: 1,
typeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },
description:
'Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.',
type: 'number',
},
// Safety Settings
{
displayName: 'Safety Settings',
name: 'safetySettings',
type: 'fixedCollection',
typeOptions: { multipleValues: true },
default: {
values: {
category: harmCategories[0].name as HarmCategory,
threshold: harmThresholds[0].name as HarmBlockThreshold,
},
},
placeholder: 'Add Option',
options: [
{
name: 'values',
displayName: 'Values',
values: [
{
displayName: 'Safety Category',
name: 'category',
type: 'options',
description: 'The category of harmful content to block',
default: 'HARM_CATEGORY_UNSPECIFIED',
options: harmCategories,
},
{
displayName: 'Safety Threshold',
name: 'threshold',
type: 'options',
description: 'The threshold of harmful content to block',
default: 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
options: harmThresholds,
},
],
},
],
},
],
},
],
};
async supplyData(this: IExecuteFunctions, itemIndex: number): Promise<SupplyData> {
const credentials = await this.getCredentials('googlePalmApi');
const modelName = this.getNodeParameter('modelName', itemIndex) as string;
const options = this.getNodeParameter('options', itemIndex, {
maxOutputTokens: 1024,
temperature: 0.7,
topK: 40,
topP: 0.9,
}) as {
maxOutputTokens: number;
temperature: number;
topK: number;
topP: number;
};
const safetySettings = this.getNodeParameter(
'options.safetySettings.values',
itemIndex,
null,
) as SafetySetting[];
const model = new ChatGoogleGenerativeAI({
apiKey: credentials.apiKey as string,
modelName,
topK: options.topK,
topP: options.topP,
temperature: options.temperature,
maxOutputTokens: options.maxOutputTokens,
safetySettings,
});
return {
response: logWrapper(model, this),
};
}
}

View File

@@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0 0 48 48"><defs><path id="a" d="M44.5 20H24v8.5h11.8C34.7 33.9 30.1 37 24 37c-7.2 0-13-5.8-13-13s5.8-13 13-13c3.1 0 5.9 1.1 8.1 2.9l6.4-6.4C34.6 4.1 29.6 2 24 2 11.8 2 2 11.8 2 24s9.8 22 22 22c11 0 21-8 21-22 0-1.3-.2-2.7-.5-4"/></defs><clipPath id="b"><use xlink:href="#a" overflow="visible"/></clipPath><path fill="#FBBC05" d="M0 37V11l17 13z" clip-path="url(#b)"/><path fill="#EA4335" d="m0 11 17 13 7-6.1L48 14V0H0z" clip-path="url(#b)"/><path fill="#34A853" d="m0 37 30-23 7.9 1L48 0v48H0z" clip-path="url(#b)"/><path fill="#4285F4" d="M48 48 17 24l-4-3 35-10z" clip-path="url(#b)"/></svg>

After

Width:  |  Height:  |  Size: 687 B

View File

@@ -0,0 +1,52 @@
import type { INodePropertyOptions } from 'n8n-workflow';
export const harmCategories: INodePropertyOptions[] = [
{
value: 'HARM_CATEGORY_HARASSMENT',
name: 'HARM_CATEGORY_HARASSMENT',
description: 'Harassment content',
},
{
value: 'HARM_CATEGORY_HATE_SPEECH',
name: 'HARM_CATEGORY_HATE_SPEECH',
description: 'Hate speech and content',
},
{
value: 'HARM_CATEGORY_SEXUALLY_EXPLICIT',
name: 'HARM_CATEGORY_SEXUALLY_EXPLICIT',
description: 'Sexually explicit content',
},
{
value: 'HARM_CATEGORY_DANGEROUS_CONTENT',
name: 'HARM_CATEGORY_DANGEROUS_CONTENT',
description: 'Dangerous content',
},
];
export const harmThresholds: INodePropertyOptions[] = [
{
value: 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
name: 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
description: 'Threshold is unspecified',
},
{
value: 'BLOCK_LOW_AND_ABOVE',
name: 'BLOCK_LOW_AND_ABOVE',
description: 'Content with NEGLIGIBLE will be allowed',
},
{
value: 'BLOCK_MEDIUM_AND_ABOVE',
name: 'BLOCK_MEDIUM_AND_ABOVE',
description: 'Content with NEGLIGIBLE and LOW will be allowed',
},
{
value: 'BLOCK_ONLY_HIGH',
name: 'BLOCK_ONLY_HIGH',
description: 'Content with NEGLIGIBLE, LOW, and MEDIUM will be allowed',
},
{
value: 'BLOCK_NONE',
name: 'BLOCK_NONE',
description: 'All content will be allowed',
},
];