n8n-nodes-vercel-ai-sdk-universal-temp 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE.md +19 -0
- package/README.md +74 -0
- package/dist/credentials/DeepSeekApi.credentials.d.ts +9 -0
- package/dist/credentials/DeepSeekApi.credentials.js +37 -0
- package/dist/credentials/DeepSeekApi.credentials.js.map +1 -0
- package/dist/credentials/GoogleGenerativeAIApi.credentials.d.ts +9 -0
- package/dist/credentials/GoogleGenerativeAIApi.credentials.js +37 -0
- package/dist/credentials/GoogleGenerativeAIApi.credentials.js.map +1 -0
- package/dist/credentials/GroqApi.credentials.d.ts +9 -0
- package/dist/credentials/GroqApi.credentials.js +37 -0
- package/dist/credentials/GroqApi.credentials.js.map +1 -0
- package/dist/credentials/OpenRouterApi.credentials.d.ts +9 -0
- package/dist/credentials/OpenRouterApi.credentials.js +37 -0
- package/dist/credentials/OpenRouterApi.credentials.js.map +1 -0
- package/dist/nodes/DeepSeek/DeepSeek.node.d.ts +10 -0
- package/dist/nodes/DeepSeek/DeepSeek.node.js +574 -0
- package/dist/nodes/DeepSeek/DeepSeek.node.js.map +1 -0
- package/dist/nodes/DeepSeek/icons/deepseek.svg +1 -0
- package/dist/nodes/GoogleGenerativeAI/GoogleGenerativeAI.node.d.ts +10 -0
- package/dist/nodes/GoogleGenerativeAI/GoogleGenerativeAI.node.js +845 -0
- package/dist/nodes/GoogleGenerativeAI/GoogleGenerativeAI.node.js.map +1 -0
- package/dist/nodes/GoogleGenerativeAI/icons/GoogleGenerativeAI.svg +1 -0
- package/dist/nodes/Groq/Groq.node.d.ts +10 -0
- package/dist/nodes/Groq/Groq.node.js +552 -0
- package/dist/nodes/Groq/Groq.node.js.map +1 -0
- package/dist/nodes/Groq/icons/groq.svg +1 -0
- package/dist/nodes/UniversalAI/UniversalAI.node.d.ts +10 -0
- package/dist/nodes/UniversalAI/UniversalAI.node.js +1273 -0
- package/dist/nodes/UniversalAI/UniversalAI.node.js.map +1 -0
- package/dist/nodes/UniversalAI/icons/UniversalAI.svg +4 -0
- package/dist/nodes/UniversalEmbedding/UniversalEmbedding.node.d.ts +10 -0
- package/dist/nodes/UniversalEmbedding/UniversalEmbedding.node.js +332 -0
- package/dist/nodes/UniversalEmbedding/UniversalEmbedding.node.js.map +1 -0
- package/dist/nodes/UniversalEmbedding/icons/UniversalEmbedding.svg +11 -0
- package/dist/nodes/UniversalImageGen/UniversalImageGen.node.d.ts +10 -0
- package/dist/nodes/UniversalImageGen/UniversalImageGen.node.js +359 -0
- package/dist/nodes/UniversalImageGen/UniversalImageGen.node.js.map +1 -0
- package/dist/nodes/UniversalImageGen/icons/UniversalImageGen.svg +5 -0
- package/dist/package.json +66 -0
- package/dist/tsconfig.tsbuildinfo +1 -0
- package/index.js +0 -0
- package/package.json +64 -0
@@ -0,0 +1,1273 @@
|
|
1
|
+
"use strict";
|
2
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
3
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
4
|
+
};
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
6
|
+
exports.UniversalAI = void 0;
|
7
|
+
const n8n_workflow_1 = require("n8n-workflow");
|
8
|
+
const { createGoogleGenerativeAI } = require('@ai-sdk/google');
|
9
|
+
const { createDeepSeek } = require('@ai-sdk/deepseek');
|
10
|
+
const { createGroq } = require('@ai-sdk/groq');
|
11
|
+
const { createOpenRouter } = require('@ai-sdk/openrouter');
|
12
|
+
const ai_1 = require("ai");
|
13
|
+
const zod_1 = require("zod");
|
14
|
+
const ajv_1 = __importDefault(require("ajv"));
|
15
|
+
async function buildInput(exec, itemIndex) {
|
16
|
+
const inputType = exec.getNodeParameter('inputType', itemIndex);
|
17
|
+
if (inputType === 'prompt') {
|
18
|
+
const promptVal = exec.getNodeParameter('prompt', itemIndex);
|
19
|
+
const systemVal = exec.getNodeParameter('system', itemIndex);
|
20
|
+
return {
|
21
|
+
prompt: promptVal,
|
22
|
+
system: systemVal,
|
23
|
+
};
|
24
|
+
}
|
25
|
+
else {
|
26
|
+
const messageAsJson = exec.getNodeParameter('messageAsJson', itemIndex, false);
|
27
|
+
if (messageAsJson) {
|
28
|
+
const rawJson = exec.getNodeParameter('messagesJson', itemIndex);
|
29
|
+
let arr;
|
30
|
+
try {
|
31
|
+
arr = JSON.parse(rawJson);
|
32
|
+
}
|
33
|
+
catch (error) {
|
34
|
+
throw new n8n_workflow_1.NodeOperationError(exec.getNode(), `Invalid JSON in "Messages (JSON)" field: ${error.message}`);
|
35
|
+
}
|
36
|
+
const parseRes = zod_1.z
|
37
|
+
.array(zod_1.z.object({
|
38
|
+
role: zod_1.z.enum(['system', 'user', 'assistant']),
|
39
|
+
content: zod_1.z.any(),
|
40
|
+
}))
|
41
|
+
.safeParse(arr);
|
42
|
+
if (!parseRes.success) {
|
43
|
+
throw new n8n_workflow_1.NodeOperationError(exec.getNode(), 'Messages must be an array of objects with role and content.');
|
44
|
+
}
|
45
|
+
const messages = parseRes.data.map((m) => ({
|
46
|
+
role: m.role,
|
47
|
+
content: m.content,
|
48
|
+
}));
|
49
|
+
return { messages };
|
50
|
+
}
|
51
|
+
else {
|
52
|
+
const items = exec.getInputData();
|
53
|
+
const messagesUi = exec.getNodeParameter('messages.messagesUi', itemIndex, []);
|
54
|
+
const builtMessages = [];
|
55
|
+
for (const msg of messagesUi) {
|
56
|
+
const role = msg.role;
|
57
|
+
if (role === 'system') {
|
58
|
+
builtMessages.push({
|
59
|
+
role,
|
60
|
+
content: msg.systemContent || '',
|
61
|
+
});
|
62
|
+
continue;
|
63
|
+
}
|
64
|
+
if (msg.contentType === 'text') {
|
65
|
+
builtMessages.push({
|
66
|
+
role,
|
67
|
+
content: msg.content || '',
|
68
|
+
});
|
69
|
+
}
|
70
|
+
else {
|
71
|
+
const parts = [];
|
72
|
+
if (msg.content) {
|
73
|
+
parts.push({
|
74
|
+
type: 'text',
|
75
|
+
text: msg.content,
|
76
|
+
});
|
77
|
+
}
|
78
|
+
let selectedMimeType = msg.mimeType || 'application/octet-stream';
|
79
|
+
if (selectedMimeType === 'other' && msg.mimeTypeOther) {
|
80
|
+
selectedMimeType = msg.mimeTypeOther;
|
81
|
+
}
|
82
|
+
if (msg.fileDataSource === 'url') {
|
83
|
+
parts.push({
|
84
|
+
type: 'file',
|
85
|
+
data: msg.fileUrl,
|
86
|
+
mimeType: selectedMimeType,
|
87
|
+
});
|
88
|
+
}
|
89
|
+
else {
|
90
|
+
const binaryProperty = msg.fileContent || 'data';
|
91
|
+
const itemBinary = items[itemIndex].binary;
|
92
|
+
if (!itemBinary || !itemBinary[binaryProperty]) {
|
93
|
+
throw new n8n_workflow_1.NodeOperationError(exec.getNode(), `Binary property "${binaryProperty}" not found on item index ${itemIndex}`);
|
94
|
+
}
|
95
|
+
const binaryData = itemBinary[binaryProperty];
|
96
|
+
const buffer = Buffer.from(binaryData.data, binaryData.data ? 'base64' : undefined);
|
97
|
+
if (selectedMimeType === 'application/octet-stream' && binaryData.mimeType) {
|
98
|
+
selectedMimeType = binaryData.mimeType;
|
99
|
+
}
|
100
|
+
parts.push({
|
101
|
+
type: 'file',
|
102
|
+
data: buffer,
|
103
|
+
mimeType: selectedMimeType,
|
104
|
+
});
|
105
|
+
}
|
106
|
+
builtMessages.push({
|
107
|
+
role,
|
108
|
+
content: parts,
|
109
|
+
});
|
110
|
+
}
|
111
|
+
}
|
112
|
+
return { messages: builtMessages };
|
113
|
+
}
|
114
|
+
}
|
115
|
+
}
|
116
|
+
function formatTextResult(result, includeRequestBody, provider) {
|
117
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r;
|
118
|
+
let text = result.text;
|
119
|
+
let reasoning = result.reasoning;
|
120
|
+
if (provider === 'groq' && text.includes('<think>')) {
|
121
|
+
const thinkMatch = text.match(/<think>(.*?)<\/think>/s);
|
122
|
+
if (thinkMatch) {
|
123
|
+
reasoning = thinkMatch[1].trim();
|
124
|
+
text = text.replace(/<think>.*?<\/think>\s*/s, '').trim();
|
125
|
+
}
|
126
|
+
}
|
127
|
+
const out = {
|
128
|
+
text,
|
129
|
+
reasoning,
|
130
|
+
toolCalls: result.toolCalls || [],
|
131
|
+
toolResults: result.toolResults || [],
|
132
|
+
finishReason: result.finishReason,
|
133
|
+
usage: {
|
134
|
+
promptTokens: (_a = result.usage) === null || _a === void 0 ? void 0 : _a.promptTokens,
|
135
|
+
completionTokens: (_b = result.usage) === null || _b === void 0 ? void 0 : _b.completionTokens,
|
136
|
+
totalTokens: (_c = result.usage) === null || _c === void 0 ? void 0 : _c.totalTokens,
|
137
|
+
...(provider === 'deepseek' && {
|
138
|
+
cacheMetrics: {
|
139
|
+
promptCacheHitTokens: (_e = (_d = result.experimental_providerMetadata) === null || _d === void 0 ? void 0 : _d.deepseek) === null || _e === void 0 ? void 0 : _e.promptCacheHitTokens,
|
140
|
+
promptCacheMissTokens: (_g = (_f = result.experimental_providerMetadata) === null || _f === void 0 ? void 0 : _f.deepseek) === null || _g === void 0 ? void 0 : _g.promptCacheMissTokens,
|
141
|
+
},
|
142
|
+
}),
|
143
|
+
...(provider === 'groq' && {
|
144
|
+
cacheMetrics: {
|
145
|
+
promptCacheHitTokens: (_j = (_h = result.experimental_providerMetadata) === null || _h === void 0 ? void 0 : _h.groq) === null || _j === void 0 ? void 0 : _j.promptCacheHitTokens,
|
146
|
+
promptCacheMissTokens: (_l = (_k = result.experimental_providerMetadata) === null || _k === void 0 ? void 0 : _k.groq) === null || _l === void 0 ? void 0 : _l.promptCacheMissTokens,
|
147
|
+
},
|
148
|
+
}),
|
149
|
+
},
|
150
|
+
response: {
|
151
|
+
id: (_m = result.response) === null || _m === void 0 ? void 0 : _m.id,
|
152
|
+
modelId: (_o = result.response) === null || _o === void 0 ? void 0 : _o.modelId,
|
153
|
+
timestamp: (_p = result.response) === null || _p === void 0 ? void 0 : _p.timestamp,
|
154
|
+
headers: (_q = result.response) === null || _q === void 0 ? void 0 : _q.headers,
|
155
|
+
},
|
156
|
+
steps: result.steps || [],
|
157
|
+
warnings: result.warnings || [],
|
158
|
+
experimental_providerMetadata: result.experimental_providerMetadata,
|
159
|
+
};
|
160
|
+
if (includeRequestBody) {
|
161
|
+
out.request = { body: (_r = result.request) === null || _r === void 0 ? void 0 : _r.body };
|
162
|
+
}
|
163
|
+
return out;
|
164
|
+
}
|
165
|
+
function formatObjectResult(result, includeRequestBody) {
|
166
|
+
var _a, _b, _c, _d, _e, _f, _g, _h;
|
167
|
+
const out = {
|
168
|
+
object: result.object,
|
169
|
+
finishReason: result.finishReason,
|
170
|
+
usage: {
|
171
|
+
promptTokens: (_a = result.usage) === null || _a === void 0 ? void 0 : _a.promptTokens,
|
172
|
+
completionTokens: (_b = result.usage) === null || _b === void 0 ? void 0 : _b.completionTokens,
|
173
|
+
totalTokens: (_c = result.usage) === null || _c === void 0 ? void 0 : _c.totalTokens,
|
174
|
+
},
|
175
|
+
response: {
|
176
|
+
id: (_d = result.response) === null || _d === void 0 ? void 0 : _d.id,
|
177
|
+
modelId: (_e = result.response) === null || _e === void 0 ? void 0 : _e.modelId,
|
178
|
+
timestamp: (_f = result.response) === null || _f === void 0 ? void 0 : _f.timestamp,
|
179
|
+
headers: (_g = result.response) === null || _g === void 0 ? void 0 : _g.headers,
|
180
|
+
},
|
181
|
+
warnings: result.warnings || [],
|
182
|
+
experimental_providerMetadata: result.experimental_providerMetadata,
|
183
|
+
};
|
184
|
+
if (includeRequestBody) {
|
185
|
+
out.request = { body: (_h = result.request) === null || _h === void 0 ? void 0 : _h.body };
|
186
|
+
}
|
187
|
+
return out;
|
188
|
+
}
|
189
|
+
class UniversalAI {
|
190
|
+
constructor() {
|
191
|
+
this.description = {
|
192
|
+
displayName: 'Universal AI',
|
193
|
+
name: 'universalAi',
|
194
|
+
icon: 'file:icons/UniversalAI.svg',
|
195
|
+
group: ['transform'],
|
196
|
+
version: 1,
|
197
|
+
subtitle: '={{$parameter["provider"] + ": " + $parameter["model"]}}',
|
198
|
+
description: 'Use multiple AI providers via Vercel AI SDK',
|
199
|
+
defaults: {
|
200
|
+
name: 'Universal AI',
|
201
|
+
},
|
202
|
+
inputs: [{ type: "main" }],
|
203
|
+
outputs: [{ type: "main" }],
|
204
|
+
credentials: [],
|
205
|
+
properties: [
|
206
|
+
{
|
207
|
+
displayName: 'Provider',
|
208
|
+
name: 'provider',
|
209
|
+
type: 'options',
|
210
|
+
required: true,
|
211
|
+
noDataExpression: true,
|
212
|
+
options: [
|
213
|
+
{
|
214
|
+
name: 'Google Generative AI',
|
215
|
+
value: 'google',
|
216
|
+
},
|
217
|
+
{
|
218
|
+
name: 'DeepSeek',
|
219
|
+
value: 'deepseek',
|
220
|
+
},
|
221
|
+
{
|
222
|
+
name: 'Groq',
|
223
|
+
value: 'groq',
|
224
|
+
},
|
225
|
+
{
|
226
|
+
name: 'OpenRouter',
|
227
|
+
value: 'openrouter',
|
228
|
+
},
|
229
|
+
],
|
230
|
+
default: 'google',
|
231
|
+
description: 'Choose which AI provider to use',
|
232
|
+
},
|
233
|
+
{
|
234
|
+
displayName: 'API Key',
|
235
|
+
name: 'apiKey',
|
236
|
+
type: 'string',
|
237
|
+
typeOptions: { password: true },
|
238
|
+
default: '',
|
239
|
+
required: false,
|
240
|
+
description: 'API key for the selected provider. Leave empty to use credentials.',
|
241
|
+
displayOptions: {
|
242
|
+
show: {
|
243
|
+
provider: ['google', 'deepseek', 'groq', 'openrouter'],
|
244
|
+
},
|
245
|
+
},
|
246
|
+
},
|
247
|
+
{
|
248
|
+
displayName: 'Base URL',
|
249
|
+
name: 'baseURL',
|
250
|
+
type: 'string',
|
251
|
+
default: `={{ $parameter.provider === 'google' ? 'https://generativelanguage.googleapis.com/v1beta' : $parameter.provider === 'deepseek' ? 'https://api.deepseek.com' : $parameter.provider === 'groq' ? 'https://api.groq.com' : $parameter.provider === 'openrouter' ? 'https://openrouter.ai/api/v1' : '' }}`,
|
252
|
+
required: false,
|
253
|
+
description: 'Base URL for the provider API. Defaults are set automatically based on the selected provider.',
|
254
|
+
},
|
255
|
+
{
|
256
|
+
displayName: 'Operation',
|
257
|
+
name: 'operation',
|
258
|
+
type: 'options',
|
259
|
+
required: true,
|
260
|
+
noDataExpression: true,
|
261
|
+
options: [
|
262
|
+
{
|
263
|
+
name: 'Generate Text',
|
264
|
+
value: 'generateText',
|
265
|
+
description: 'Generate text using simple prompt or chat messages',
|
266
|
+
action: 'Generate text',
|
267
|
+
},
|
268
|
+
{
|
269
|
+
name: 'Generate Object',
|
270
|
+
value: 'generateObject',
|
271
|
+
description: 'Generate a structured object based on a JSON schema',
|
272
|
+
action: 'Generate object',
|
273
|
+
},
|
274
|
+
],
|
275
|
+
default: 'generateText',
|
276
|
+
description: 'Which type of output you want to generate',
|
277
|
+
},
|
278
|
+
{
|
279
|
+
displayName: 'Model Name or ID',
|
280
|
+
name: 'model',
|
281
|
+
type: 'options',
|
282
|
+
required: true,
|
283
|
+
typeOptions: {
|
284
|
+
loadOptionsMethod: 'getModels',
|
285
|
+
loadOptionsDependsOn: ['provider'],
|
286
|
+
},
|
287
|
+
default: '',
|
288
|
+
description: 'Select which model to use. Choose from the list, or specify an ID using an <a href="https://docs.n8n.io/code-examples/expressions/">expression</a>.',
|
289
|
+
},
|
290
|
+
{
|
291
|
+
displayName: 'Input Type',
|
292
|
+
name: 'inputType',
|
293
|
+
type: 'options',
|
294
|
+
required: true,
|
295
|
+
options: [
|
296
|
+
{
|
297
|
+
name: 'Simple Prompt',
|
298
|
+
value: 'prompt',
|
299
|
+
description: 'Use a single prompt',
|
300
|
+
},
|
301
|
+
{
|
302
|
+
name: 'Messages',
|
303
|
+
value: 'messages',
|
304
|
+
description: 'Use a conversation with multiple messages',
|
305
|
+
},
|
306
|
+
],
|
307
|
+
default: 'prompt',
|
308
|
+
description: 'Choose how you want to provide input to the model',
|
309
|
+
},
|
310
|
+
{
|
311
|
+
displayName: 'System',
|
312
|
+
name: 'system',
|
313
|
+
type: 'string',
|
314
|
+
typeOptions: {
|
315
|
+
rows: 4,
|
316
|
+
},
|
317
|
+
displayOptions: {
|
318
|
+
show: {
|
319
|
+
inputType: ['prompt'],
|
320
|
+
},
|
321
|
+
},
|
322
|
+
default: 'You are a helpful assistant.',
|
323
|
+
description: "System prompt that specifies the model's behavior",
|
324
|
+
hint: "This field is optional, but can help guide the model's responses.",
|
325
|
+
requiresDataPath: 'single',
|
326
|
+
},
|
327
|
+
{
|
328
|
+
displayName: 'Prompt',
|
329
|
+
name: 'prompt',
|
330
|
+
type: 'string',
|
331
|
+
typeOptions: {
|
332
|
+
rows: 4,
|
333
|
+
},
|
334
|
+
displayOptions: {
|
335
|
+
show: {
|
336
|
+
inputType: ['prompt'],
|
337
|
+
},
|
338
|
+
},
|
339
|
+
default: '',
|
340
|
+
required: true,
|
341
|
+
description: 'The single text prompt to generate a completion for',
|
342
|
+
hint: 'You can drag data from previous nodes here using expressions.',
|
343
|
+
requiresDataPath: 'single',
|
344
|
+
},
|
345
|
+
{
|
346
|
+
displayName: 'Messages',
|
347
|
+
name: 'messages',
|
348
|
+
type: 'fixedCollection',
|
349
|
+
typeOptions: {
|
350
|
+
multipleValues: true,
|
351
|
+
sortable: true,
|
352
|
+
minValue: 1,
|
353
|
+
},
|
354
|
+
displayOptions: {
|
355
|
+
show: {
|
356
|
+
inputType: ['messages'],
|
357
|
+
messageAsJson: [false],
|
358
|
+
},
|
359
|
+
},
|
360
|
+
description: 'The messages for the conversation',
|
361
|
+
default: {
|
362
|
+
messagesUi: [
|
363
|
+
{
|
364
|
+
role: 'system',
|
365
|
+
systemContent: 'You are a helpful assistant.',
|
366
|
+
},
|
367
|
+
{
|
368
|
+
role: 'user',
|
369
|
+
contentType: 'text',
|
370
|
+
content: 'How can you help me?',
|
371
|
+
},
|
372
|
+
],
|
373
|
+
},
|
374
|
+
required: true,
|
375
|
+
options: [
|
376
|
+
{
|
377
|
+
name: 'messagesUi',
|
378
|
+
displayName: 'Message',
|
379
|
+
values: [
|
380
|
+
{
|
381
|
+
displayName: 'Role',
|
382
|
+
name: 'role',
|
383
|
+
type: 'options',
|
384
|
+
options: [
|
385
|
+
{
|
386
|
+
name: 'Assistant',
|
387
|
+
value: 'assistant',
|
388
|
+
},
|
389
|
+
{
|
390
|
+
name: 'System',
|
391
|
+
value: 'system',
|
392
|
+
},
|
393
|
+
{
|
394
|
+
name: 'User',
|
395
|
+
value: 'user',
|
396
|
+
},
|
397
|
+
],
|
398
|
+
default: 'user',
|
399
|
+
required: true,
|
400
|
+
},
|
401
|
+
{
|
402
|
+
displayName: 'System Content',
|
403
|
+
name: 'systemContent',
|
404
|
+
type: 'string',
|
405
|
+
description: 'The text content if role is System',
|
406
|
+
required: true,
|
407
|
+
typeOptions: {
|
408
|
+
rows: 4,
|
409
|
+
},
|
410
|
+
default: '',
|
411
|
+
displayOptions: {
|
412
|
+
show: {
|
413
|
+
role: ['system'],
|
414
|
+
},
|
415
|
+
},
|
416
|
+
requiresDataPath: 'single',
|
417
|
+
},
|
418
|
+
{
|
419
|
+
displayName: 'Content Type',
|
420
|
+
name: 'contentType',
|
421
|
+
type: 'options',
|
422
|
+
options: [
|
423
|
+
{
|
424
|
+
name: 'Text',
|
425
|
+
value: 'text',
|
426
|
+
},
|
427
|
+
{
|
428
|
+
name: 'Binary File',
|
429
|
+
value: 'file',
|
430
|
+
},
|
431
|
+
],
|
432
|
+
default: 'text',
|
433
|
+
description: 'The type of content to send',
|
434
|
+
required: true,
|
435
|
+
displayOptions: {
|
436
|
+
show: {
|
437
|
+
role: ['assistant', 'user'],
|
438
|
+
},
|
439
|
+
},
|
440
|
+
},
|
441
|
+
{
|
442
|
+
displayName: 'Text Content',
|
443
|
+
name: 'content',
|
444
|
+
type: 'string',
|
445
|
+
typeOptions: {
|
446
|
+
rows: 4,
|
447
|
+
},
|
448
|
+
displayOptions: {
|
449
|
+
show: {
|
450
|
+
role: ['assistant', 'user'],
|
451
|
+
contentType: ['text'],
|
452
|
+
},
|
453
|
+
},
|
454
|
+
default: '',
|
455
|
+
description: 'The text content of the message',
|
456
|
+
required: true,
|
457
|
+
requiresDataPath: 'single',
|
458
|
+
},
|
459
|
+
{
|
460
|
+
displayName: 'File Source',
|
461
|
+
name: 'fileDataSource',
|
462
|
+
type: 'options',
|
463
|
+
options: [
|
464
|
+
{
|
465
|
+
name: 'Binary',
|
466
|
+
value: 'binary',
|
467
|
+
description: 'Use a binary property from n8n input',
|
468
|
+
},
|
469
|
+
{
|
470
|
+
name: 'URL',
|
471
|
+
value: 'url',
|
472
|
+
description: 'Send a URL for the AI to fetch',
|
473
|
+
},
|
474
|
+
],
|
475
|
+
default: 'binary',
|
476
|
+
description: 'Where the file is coming from',
|
477
|
+
displayOptions: {
|
478
|
+
show: {
|
479
|
+
role: ['assistant', 'user'],
|
480
|
+
contentType: ['file'],
|
481
|
+
},
|
482
|
+
},
|
483
|
+
},
|
484
|
+
{
|
485
|
+
displayName: 'Binary Property',
|
486
|
+
name: 'fileContent',
|
487
|
+
type: 'string',
|
488
|
+
default: 'data',
|
489
|
+
description: 'Name of the binary property containing the file data',
|
490
|
+
required: true,
|
491
|
+
displayOptions: {
|
492
|
+
show: {
|
493
|
+
role: ['assistant', 'user'],
|
494
|
+
contentType: ['file'],
|
495
|
+
fileDataSource: ['binary'],
|
496
|
+
},
|
497
|
+
},
|
498
|
+
},
|
499
|
+
{
|
500
|
+
displayName: 'File URL',
|
501
|
+
name: 'fileUrl',
|
502
|
+
type: 'string',
|
503
|
+
default: '',
|
504
|
+
description: 'URL of the file to download',
|
505
|
+
required: true,
|
506
|
+
displayOptions: {
|
507
|
+
show: {
|
508
|
+
role: ['assistant', 'user'],
|
509
|
+
contentType: ['file'],
|
510
|
+
fileDataSource: ['url'],
|
511
|
+
},
|
512
|
+
},
|
513
|
+
requiresDataPath: 'single',
|
514
|
+
},
|
515
|
+
{
|
516
|
+
displayName: 'MIME Type',
|
517
|
+
name: 'mimeType',
|
518
|
+
type: 'options',
|
519
|
+
default: 'application/octet-stream',
|
520
|
+
description: 'Select the MIME type of the file; choose Other to specify a custom MIME type',
|
521
|
+
options: [
|
522
|
+
{
|
523
|
+
name: 'JPEG Image (Image/jpeg)',
|
524
|
+
value: 'image/jpeg',
|
525
|
+
},
|
526
|
+
{
|
527
|
+
name: 'JSON (Application/json)',
|
528
|
+
value: 'application/json',
|
529
|
+
},
|
530
|
+
{
|
531
|
+
name: 'MP3 Audio (Audio/mpeg)',
|
532
|
+
value: 'audio/mpeg',
|
533
|
+
},
|
534
|
+
{
|
535
|
+
name: 'MP4 Video (Video/mp4)',
|
536
|
+
value: 'video/mp4',
|
537
|
+
},
|
538
|
+
{
|
539
|
+
name: 'Octet Stream (Default)',
|
540
|
+
value: 'application/octet-stream',
|
541
|
+
},
|
542
|
+
{
|
543
|
+
name: 'Other (Specify Below)',
|
544
|
+
value: 'other',
|
545
|
+
},
|
546
|
+
{
|
547
|
+
name: 'PDF (Application/pdf)',
|
548
|
+
value: 'application/pdf',
|
549
|
+
},
|
550
|
+
{
|
551
|
+
name: 'Plain Text (Text/plain)',
|
552
|
+
value: 'text/plain',
|
553
|
+
},
|
554
|
+
{
|
555
|
+
name: 'PNG Image (Image/png)',
|
556
|
+
value: 'image/png',
|
557
|
+
},
|
558
|
+
{
|
559
|
+
name: 'WAV Audio (Audio/wav)',
|
560
|
+
value: 'audio/wav',
|
561
|
+
},
|
562
|
+
],
|
563
|
+
displayOptions: {
|
564
|
+
show: {
|
565
|
+
role: ['assistant', 'user'],
|
566
|
+
contentType: ['file'],
|
567
|
+
fileDataSource: ['url'],
|
568
|
+
},
|
569
|
+
},
|
570
|
+
},
|
571
|
+
{
|
572
|
+
displayName: 'Other MIME Type',
|
573
|
+
name: 'mimeTypeOther',
|
574
|
+
type: 'string',
|
575
|
+
default: '',
|
576
|
+
description: 'Specify a custom MIME type, e.g. application/x-zip-compressed',
|
577
|
+
displayOptions: {
|
578
|
+
show: {
|
579
|
+
role: ['assistant', 'user'],
|
580
|
+
contentType: ['file'],
|
581
|
+
mimeType: ['other'],
|
582
|
+
},
|
583
|
+
},
|
584
|
+
},
|
585
|
+
{
|
586
|
+
displayName: 'Additional Text',
|
587
|
+
name: 'content',
|
588
|
+
type: 'string',
|
589
|
+
typeOptions: {
|
590
|
+
rows: 2,
|
591
|
+
},
|
592
|
+
displayOptions: {
|
593
|
+
show: {
|
594
|
+
role: ['assistant', 'user'],
|
595
|
+
contentType: ['file'],
|
596
|
+
},
|
597
|
+
},
|
598
|
+
default: 'Please analyze this file.',
|
599
|
+
description: 'Additional text to include with the file',
|
600
|
+
required: true,
|
601
|
+
requiresDataPath: 'single',
|
602
|
+
},
|
603
|
+
],
|
604
|
+
},
|
605
|
+
],
|
606
|
+
},
|
607
|
+
{
|
608
|
+
displayName: 'Messages as JSON',
|
609
|
+
name: 'messageAsJson',
|
610
|
+
type: 'boolean',
|
611
|
+
default: false,
|
612
|
+
description: 'Whether to input messages as a JSON array instead of using the UI',
|
613
|
+
displayOptions: {
|
614
|
+
show: {
|
615
|
+
operation: ['generateText', 'generateObject'],
|
616
|
+
inputType: ['messages'],
|
617
|
+
},
|
618
|
+
},
|
619
|
+
},
|
620
|
+
{
|
621
|
+
displayName: 'Messages (JSON)',
|
622
|
+
name: 'messagesJson',
|
623
|
+
type: 'string',
|
624
|
+
default: '=[{"role": "user", "content": "Hello!"}]',
|
625
|
+
description: 'Enter an array of message objects in JSON format (role, content)',
|
626
|
+
required: true,
|
627
|
+
typeOptions: {
|
628
|
+
rows: 4,
|
629
|
+
},
|
630
|
+
noDataExpression: false,
|
631
|
+
requiresDataPath: 'single',
|
632
|
+
displayOptions: {
|
633
|
+
show: {
|
634
|
+
operation: ['generateText', 'generateObject'],
|
635
|
+
inputType: ['messages'],
|
636
|
+
messageAsJson: [true],
|
637
|
+
},
|
638
|
+
},
|
639
|
+
},
|
640
|
+
{
|
641
|
+
displayName: 'Schema Name',
|
642
|
+
name: 'schemaName',
|
643
|
+
type: 'string',
|
644
|
+
default: '',
|
645
|
+
description: 'Name of the output schema (optional)',
|
646
|
+
hint: 'Some providers use this name for additional guidance when generating objects.',
|
647
|
+
displayOptions: {
|
648
|
+
show: {
|
649
|
+
operation: ['generateObject'],
|
650
|
+
},
|
651
|
+
},
|
652
|
+
},
|
653
|
+
{
|
654
|
+
displayName: 'Schema Description',
|
655
|
+
name: 'schemaDescription',
|
656
|
+
type: 'string',
|
657
|
+
default: '',
|
658
|
+
description: 'Description of the output schema (optional)',
|
659
|
+
hint: 'Some providers use this description for additional guidance when generating objects.',
|
660
|
+
displayOptions: {
|
661
|
+
show: {
|
662
|
+
operation: ['generateObject'],
|
663
|
+
},
|
664
|
+
},
|
665
|
+
},
|
666
|
+
{
|
667
|
+
displayName: 'Schema',
|
668
|
+
name: 'schema',
|
669
|
+
type: 'json',
|
670
|
+
displayOptions: {
|
671
|
+
show: {
|
672
|
+
operation: ['generateObject'],
|
673
|
+
},
|
674
|
+
},
|
675
|
+
default: `{\n\t"type": "object",\n\t"properties": {\n\t\t"sentiment": {\n\t\t"type": "string",\n\t\t"enum": ["positive","negative","neutral"],\n\t\t"description": "The overall sentiment of the text"\n\t\t},\n\t\t"score": {\n\t\t"type": "number",\n\t\t"minimum": -1,\n\t\t"maximum": 1,\n\t\t"description": "Sentiment score from -1 (negative) to 1 (positive)"\n\t\t},\n\t\t"text": {\n\t\t"type": "string",\n\t\t"description": "The text content to analyze"\n\t\t}\n\t}\n}`,
|
676
|
+
required: true,
|
677
|
+
description: 'JSON schema describing the structure and constraints of the object to generate',
|
678
|
+
hint: 'For example, a schema describing sentiment analysis output.',
|
679
|
+
requiresDataPath: 'single',
|
680
|
+
},
|
681
|
+
{
|
682
|
+
displayName: 'Options',
|
683
|
+
name: 'options',
|
684
|
+
type: 'collection',
|
685
|
+
placeholder: 'Add Option',
|
686
|
+
default: {},
|
687
|
+
options: [
|
688
|
+
{
|
689
|
+
displayName: 'Max Tokens',
|
690
|
+
name: 'maxTokens',
|
691
|
+
type: 'number',
|
692
|
+
typeOptions: {
|
693
|
+
minValue: 1,
|
694
|
+
},
|
695
|
+
default: 2048,
|
696
|
+
description: 'The maximum number of tokens to generate',
|
697
|
+
},
|
698
|
+
{
|
699
|
+
displayName: 'Temperature',
|
700
|
+
name: 'temperature',
|
701
|
+
type: 'number',
|
702
|
+
typeOptions: {
|
703
|
+
minValue: 0,
|
704
|
+
maxValue: 2,
|
705
|
+
numberPrecision: 2,
|
706
|
+
},
|
707
|
+
default: 0.7,
|
708
|
+
description: 'Higher values produce more random outputs',
|
709
|
+
},
|
710
|
+
{
|
711
|
+
displayName: 'Stop Sequences',
|
712
|
+
name: 'stopSequences',
|
713
|
+
type: 'string',
|
714
|
+
default: '',
|
715
|
+
description: 'Sequences where the API will stop generating text. Separate multiple sequences with commas.',
|
716
|
+
displayOptions: {
|
717
|
+
show: {
|
718
|
+
'/provider': ['google', 'openrouter'],
|
719
|
+
},
|
720
|
+
},
|
721
|
+
},
|
722
|
+
{
|
723
|
+
displayName: 'Include Request Body',
|
724
|
+
name: 'includeRequestBody',
|
725
|
+
type: 'boolean',
|
726
|
+
default: false,
|
727
|
+
description: 'Whether to include the request body in the output',
|
728
|
+
},
|
729
|
+
],
|
730
|
+
},
|
731
|
+
{
|
732
|
+
displayName: 'Safety Settings',
|
733
|
+
name: 'safetySettings',
|
734
|
+
type: 'fixedCollection',
|
735
|
+
typeOptions: {
|
736
|
+
multipleValues: true,
|
737
|
+
},
|
738
|
+
default: {},
|
739
|
+
displayOptions: {
|
740
|
+
show: {
|
741
|
+
provider: ['google'],
|
742
|
+
},
|
743
|
+
},
|
744
|
+
options: [
|
745
|
+
{
|
746
|
+
name: 'settings',
|
747
|
+
displayName: 'Setting',
|
748
|
+
values: [
|
749
|
+
{
|
750
|
+
displayName: 'Category',
|
751
|
+
name: 'category',
|
752
|
+
type: 'options',
|
753
|
+
options: [
|
754
|
+
{ name: 'Hate Speech', value: 'HARM_CATEGORY_HATE_SPEECH' },
|
755
|
+
{ name: 'Dangerous Content', value: 'HARM_CATEGORY_DANGEROUS_CONTENT' },
|
756
|
+
{ name: 'Harassment', value: 'HARM_CATEGORY_HARASSMENT' },
|
757
|
+
{ name: 'Sexually Explicit', value: 'HARM_CATEGORY_SEXUALLY_EXPLICIT' },
|
758
|
+
],
|
759
|
+
default: 'HARM_CATEGORY_HATE_SPEECH',
|
760
|
+
},
|
761
|
+
{
|
762
|
+
displayName: 'Threshold',
|
763
|
+
name: 'threshold',
|
764
|
+
type: 'options',
|
765
|
+
options: [
|
766
|
+
{ name: 'Block Low and Above', value: 'BLOCK_LOW_AND_ABOVE' },
|
767
|
+
{ name: 'Block Medium and Above', value: 'BLOCK_MEDIUM_AND_ABOVE' },
|
768
|
+
{ name: 'Block Only High', value: 'BLOCK_ONLY_HIGH' },
|
769
|
+
{ name: 'Block None', value: 'BLOCK_NONE' },
|
770
|
+
],
|
771
|
+
default: 'BLOCK_MEDIUM_AND_ABOVE',
|
772
|
+
},
|
773
|
+
],
|
774
|
+
},
|
775
|
+
],
|
776
|
+
description: 'Set safety categories and thresholds to block or filter certain outputs',
|
777
|
+
},
|
778
|
+
{
|
779
|
+
displayName: 'Use Search Grounding',
|
780
|
+
name: 'useSearchGrounding',
|
781
|
+
type: 'boolean',
|
782
|
+
default: false,
|
783
|
+
displayOptions: {
|
784
|
+
show: {
|
785
|
+
provider: ['google'],
|
786
|
+
},
|
787
|
+
},
|
788
|
+
description: 'Whether to enable real-time or up-to-date information if supported by the model',
|
789
|
+
},
|
790
|
+
{
|
791
|
+
displayName: 'Cached Content',
|
792
|
+
name: 'cachedContent',
|
793
|
+
type: 'string',
|
794
|
+
default: '',
|
795
|
+
displayOptions: {
|
796
|
+
show: {
|
797
|
+
provider: ['google'],
|
798
|
+
},
|
799
|
+
},
|
800
|
+
description: 'Name of cached content to use (format: cachedContents/{cachedContent}). Reduces costs for repetitive content.',
|
801
|
+
},
|
802
|
+
{
|
803
|
+
displayName: 'Response Modalities',
|
804
|
+
name: 'responseModalities',
|
805
|
+
type: 'multiOptions',
|
806
|
+
options: [
|
807
|
+
{ name: 'Text', value: 'TEXT' },
|
808
|
+
{ name: 'Image', value: 'IMAGE' },
|
809
|
+
],
|
810
|
+
default: [],
|
811
|
+
displayOptions: {
|
812
|
+
show: {
|
813
|
+
provider: ['google'],
|
814
|
+
},
|
815
|
+
},
|
816
|
+
description: 'Output modalities for the response. Leave empty for text-only (default).',
|
817
|
+
},
|
818
|
+
{
|
819
|
+
displayName: 'Thinking Budget',
|
820
|
+
name: 'thinkingBudget',
|
821
|
+
type: 'number',
|
822
|
+
default: 0,
|
823
|
+
typeOptions: {
|
824
|
+
minValue: 0,
|
825
|
+
maxValue: 8192,
|
826
|
+
},
|
827
|
+
displayOptions: {
|
828
|
+
show: {
|
829
|
+
provider: ['google'],
|
830
|
+
},
|
831
|
+
},
|
832
|
+
description: 'Number of thinking tokens for reasoning models (Gemini 2.5+). Set to 0 to disable thinking.',
|
833
|
+
},
|
834
|
+
{
|
835
|
+
displayName: 'Include Thoughts',
|
836
|
+
name: 'includeThoughts',
|
837
|
+
type: 'boolean',
|
838
|
+
default: false,
|
839
|
+
displayOptions: {
|
840
|
+
show: {
|
841
|
+
provider: ['google'],
|
842
|
+
},
|
843
|
+
},
|
844
|
+
description: 'Whether to include thought summaries in the response (reasoning insights)',
|
845
|
+
},
|
846
|
+
{
|
847
|
+
displayName: 'Google Tools',
|
848
|
+
name: 'googleTools',
|
849
|
+
type: 'multiOptions',
|
850
|
+
options: [
|
851
|
+
{ name: 'Google Search', value: 'google_search' },
|
852
|
+
{ name: 'URL Context', value: 'url_context' },
|
853
|
+
{ name: 'Code Execution', value: 'code_execution' },
|
854
|
+
],
|
855
|
+
default: [],
|
856
|
+
displayOptions: {
|
857
|
+
show: {
|
858
|
+
provider: ['google'],
|
859
|
+
},
|
860
|
+
},
|
861
|
+
description: 'Enable Google-specific tools for enhanced capabilities',
|
862
|
+
},
|
863
|
+
{
|
864
|
+
displayName: 'Enable Streaming',
|
865
|
+
name: 'enableStreaming',
|
866
|
+
type: 'boolean',
|
867
|
+
default: false,
|
868
|
+
displayOptions: {
|
869
|
+
show: {
|
870
|
+
operation: ['generateText'],
|
871
|
+
},
|
872
|
+
},
|
873
|
+
description: 'Whether to stream the response in chunks. Output will contain multiple items.',
|
874
|
+
},
|
875
|
+
],
|
876
|
+
};
|
877
|
+
this.methods = {
|
878
|
+
loadOptions: {
|
879
|
+
async getModels() {
|
880
|
+
const provider = this.getCurrentNodeParameter('provider');
|
881
|
+
try {
|
882
|
+
switch (provider) {
|
883
|
+
case 'google': {
|
884
|
+
const credentials = await this.getCredentials('googleGenerativeAIApi');
|
885
|
+
const response = await this.helpers.request({
|
886
|
+
method: 'GET',
|
887
|
+
url: 'https://generativelanguage.googleapis.com/v1beta/models',
|
888
|
+
headers: {
|
889
|
+
'x-goog-api-key': credentials.apiKey,
|
890
|
+
},
|
891
|
+
json: true,
|
892
|
+
});
|
893
|
+
const returnData = [];
|
894
|
+
if (response.models) {
|
895
|
+
for (const model of response.models) {
|
896
|
+
if (model.name.includes('gemini')) {
|
897
|
+
const modelId = model.name.split('/').pop();
|
898
|
+
const displayName = model.displayName || modelId;
|
899
|
+
const version = modelId.includes('latest')
|
900
|
+
? '(Latest)'
|
901
|
+
: `(${model.version || 'v1'})`;
|
902
|
+
returnData.push({
|
903
|
+
name: `${displayName} ${version}`,
|
904
|
+
value: modelId,
|
905
|
+
description: model.description || '',
|
906
|
+
});
|
907
|
+
}
|
908
|
+
}
|
909
|
+
}
|
910
|
+
return returnData.sort((a, b) => a.name.localeCompare(b.name));
|
911
|
+
}
|
912
|
+
case 'deepseek': {
|
913
|
+
return [
|
914
|
+
{
|
915
|
+
name: 'DeepSeek Chat',
|
916
|
+
value: 'deepseek-chat',
|
917
|
+
description: 'DeepSeek chat model',
|
918
|
+
},
|
919
|
+
{
|
920
|
+
name: 'DeepSeek Coder',
|
921
|
+
value: 'deepseek-coder',
|
922
|
+
description: 'DeepSeek coding model',
|
923
|
+
},
|
924
|
+
];
|
925
|
+
}
|
926
|
+
case 'groq': {
|
927
|
+
return [
|
928
|
+
{
|
929
|
+
name: 'Llama 3.1 8B',
|
930
|
+
value: 'llama-3.1-8b-instant',
|
931
|
+
description: 'Fast and efficient 8B model',
|
932
|
+
},
|
933
|
+
{
|
934
|
+
name: 'Llama 3.1 70B',
|
935
|
+
value: 'llama-3.1-70b-versatile',
|
936
|
+
description: 'High-performance 70B model',
|
937
|
+
},
|
938
|
+
{
|
939
|
+
name: 'Llama 3.1 405B',
|
940
|
+
value: 'llama-3.1-405b-instruct',
|
941
|
+
description: 'Most capable Llama model',
|
942
|
+
},
|
943
|
+
{
|
944
|
+
name: 'Mixtral 8x7B',
|
945
|
+
value: 'mixtral-8x7b-32768',
|
946
|
+
description: 'Mixture of experts model',
|
947
|
+
},
|
948
|
+
{
|
949
|
+
name: 'Gemma 7B',
|
950
|
+
value: 'gemma-7b-it',
|
951
|
+
description: 'Google Gemma model',
|
952
|
+
},
|
953
|
+
];
|
954
|
+
}
|
955
|
+
case 'openrouter': {
|
956
|
+
try {
|
957
|
+
const credentials = await this.getCredentials('openRouterApi');
|
958
|
+
const response = await this.helpers.request({
|
959
|
+
method: 'GET',
|
960
|
+
url: 'https://openrouter.ai/api/v1/models',
|
961
|
+
headers: {
|
962
|
+
Authorization: `Bearer ${credentials.apiKey}`,
|
963
|
+
},
|
964
|
+
json: true,
|
965
|
+
});
|
966
|
+
const returnData = [];
|
967
|
+
if (response.data) {
|
968
|
+
for (const model of response.data) {
|
969
|
+
returnData.push({
|
970
|
+
name: model.name || model.id,
|
971
|
+
value: model.id,
|
972
|
+
description: model.description || '',
|
973
|
+
});
|
974
|
+
}
|
975
|
+
}
|
976
|
+
return returnData.sort((a, b) => a.name.localeCompare(b.name));
|
977
|
+
}
|
978
|
+
catch (error) {
|
979
|
+
return [
|
980
|
+
{
|
981
|
+
name: 'GPT-4o',
|
982
|
+
value: 'openai/gpt-4o',
|
983
|
+
description: 'OpenAI GPT-4o via OpenRouter',
|
984
|
+
},
|
985
|
+
{
|
986
|
+
name: 'GPT-4o Mini',
|
987
|
+
value: 'openai/gpt-4o-mini',
|
988
|
+
description: 'OpenAI GPT-4o Mini via OpenRouter',
|
989
|
+
},
|
990
|
+
{
|
991
|
+
name: 'Claude 3.5 Sonnet',
|
992
|
+
value: 'anthropic/claude-3.5-sonnet',
|
993
|
+
description: 'Anthropic Claude 3.5 Sonnet via OpenRouter',
|
994
|
+
},
|
995
|
+
{
|
996
|
+
name: 'Gemini 1.5 Pro',
|
997
|
+
value: 'google/gemini-pro-1.5',
|
998
|
+
description: 'Google Gemini 1.5 Pro via OpenRouter',
|
999
|
+
},
|
1000
|
+
];
|
1001
|
+
}
|
1002
|
+
}
|
1003
|
+
default:
|
1004
|
+
return [];
|
1005
|
+
}
|
1006
|
+
}
|
1007
|
+
catch (error) {
|
1008
|
+
switch (provider) {
|
1009
|
+
case 'google':
|
1010
|
+
return [
|
1011
|
+
{
|
1012
|
+
name: 'Gemini 1.5 Pro (Latest)',
|
1013
|
+
value: 'gemini-1.5-pro-latest',
|
1014
|
+
description: 'Most capable Gemini model for text generation',
|
1015
|
+
},
|
1016
|
+
{
|
1017
|
+
name: 'Gemini 1.5 Flash (Latest)',
|
1018
|
+
value: 'gemini-1.5-flash-latest',
|
1019
|
+
description: 'Optimized for speed while maintaining high quality',
|
1020
|
+
},
|
1021
|
+
];
|
1022
|
+
case 'deepseek':
|
1023
|
+
return [
|
1024
|
+
{
|
1025
|
+
name: 'DeepSeek Chat',
|
1026
|
+
value: 'deepseek-chat',
|
1027
|
+
description: 'DeepSeek chat model',
|
1028
|
+
},
|
1029
|
+
];
|
1030
|
+
case 'groq':
|
1031
|
+
return [
|
1032
|
+
{
|
1033
|
+
name: 'Llama 3.1 8B',
|
1034
|
+
value: 'llama-3.1-8b-instant',
|
1035
|
+
description: 'Fast and efficient 8B model',
|
1036
|
+
},
|
1037
|
+
];
|
1038
|
+
case 'openrouter':
|
1039
|
+
return [
|
1040
|
+
{
|
1041
|
+
name: 'GPT-4o',
|
1042
|
+
value: 'openai/gpt-4o',
|
1043
|
+
description: 'OpenAI GPT-4o via OpenRouter',
|
1044
|
+
},
|
1045
|
+
{
|
1046
|
+
name: 'Claude 3.5 Sonnet',
|
1047
|
+
value: 'anthropic/claude-3.5-sonnet',
|
1048
|
+
description: 'Anthropic Claude 3.5 Sonnet via OpenRouter',
|
1049
|
+
},
|
1050
|
+
];
|
1051
|
+
default:
|
1052
|
+
return [];
|
1053
|
+
}
|
1054
|
+
}
|
1055
|
+
},
|
1056
|
+
},
|
1057
|
+
};
|
1058
|
+
}
|
1059
|
+
async execute() {
|
1060
|
+
const items = this.getInputData();
|
1061
|
+
const returnData = [];
|
1062
|
+
const provider = this.getNodeParameter('provider', 0);
|
1063
|
+
const apiKey = this.getNodeParameter('apiKey', 0, '');
|
1064
|
+
const baseURL = this.getNodeParameter('baseURL', 0, '');
|
1065
|
+
let credentials = null;
|
1066
|
+
if (!apiKey) {
|
1067
|
+
switch (provider) {
|
1068
|
+
case 'google':
|
1069
|
+
credentials = await this.getCredentials('googleGenerativeAIApi');
|
1070
|
+
break;
|
1071
|
+
case 'deepseek':
|
1072
|
+
credentials = await this.getCredentials('deepSeekApi');
|
1073
|
+
break;
|
1074
|
+
case 'groq':
|
1075
|
+
credentials = await this.getCredentials('groqApi');
|
1076
|
+
break;
|
1077
|
+
case 'openrouter':
|
1078
|
+
credentials = await this.getCredentials('openRouterApi');
|
1079
|
+
break;
|
1080
|
+
}
|
1081
|
+
if (!(credentials === null || credentials === void 0 ? void 0 : credentials.apiKey)) {
|
1082
|
+
throw new n8n_workflow_1.NodeOperationError(this.getNode(), 'No API key provided in credentials');
|
1083
|
+
}
|
1084
|
+
}
|
1085
|
+
const actualApiKey = apiKey || credentials.apiKey;
|
1086
|
+
let aiProvider;
|
1087
|
+
switch (provider) {
|
1088
|
+
case 'google':
|
1089
|
+
aiProvider = createGoogleGenerativeAI({
|
1090
|
+
apiKey: actualApiKey,
|
1091
|
+
baseURL: baseURL || 'https://generativelanguage.googleapis.com/v1beta',
|
1092
|
+
headers: {
|
1093
|
+
'x-goog-api-key': actualApiKey,
|
1094
|
+
},
|
1095
|
+
});
|
1096
|
+
break;
|
1097
|
+
case 'deepseek':
|
1098
|
+
aiProvider = createDeepSeek({
|
1099
|
+
apiKey: actualApiKey,
|
1100
|
+
baseURL: baseURL || 'https://api.deepseek.com',
|
1101
|
+
});
|
1102
|
+
break;
|
1103
|
+
case 'groq':
|
1104
|
+
aiProvider = createGroq({
|
1105
|
+
apiKey: actualApiKey,
|
1106
|
+
baseURL: baseURL || 'https://api.groq.com',
|
1107
|
+
});
|
1108
|
+
break;
|
1109
|
+
case 'openrouter':
|
1110
|
+
aiProvider = createOpenRouter({
|
1111
|
+
apiKey: actualApiKey,
|
1112
|
+
baseURL: baseURL || 'https://openrouter.ai/api/v1',
|
1113
|
+
});
|
1114
|
+
break;
|
1115
|
+
}
|
1116
|
+
for (let i = 0; i < items.length; i++) {
|
1117
|
+
try {
|
1118
|
+
const operation = this.getNodeParameter('operation', i);
|
1119
|
+
const model = this.getNodeParameter('model', i);
|
1120
|
+
const options = this.getNodeParameter('options', i, {});
|
1121
|
+
let modelSettings = {};
|
1122
|
+
let stopSequences;
|
1123
|
+
if (options.stopSequences && (provider === 'google' || provider === 'openrouter')) {
|
1124
|
+
stopSequences = options.stopSequences.split(',').map(s => s.trim()).filter(s => s.length > 0);
|
1125
|
+
}
|
1126
|
+
if (provider === 'google') {
|
1127
|
+
const safetySettingsRaw = this.getNodeParameter('safetySettings.settings', i, []);
|
1128
|
+
const useSearchGrounding = this.getNodeParameter('useSearchGrounding', i, false);
|
1129
|
+
const cachedContent = this.getNodeParameter('cachedContent', i, '');
|
1130
|
+
const responseModalities = this.getNodeParameter('responseModalities', i, []);
|
1131
|
+
const thinkingBudget = this.getNodeParameter('thinkingBudget', i, 0);
|
1132
|
+
const includeThoughts = this.getNodeParameter('includeThoughts', i, false);
|
1133
|
+
const safetySettings = safetySettingsRaw.map((s) => ({
|
1134
|
+
category: s.category,
|
1135
|
+
threshold: s.threshold,
|
1136
|
+
}));
|
1137
|
+
modelSettings = {
|
1138
|
+
structuredOutputs: operation === 'generateObject',
|
1139
|
+
safetySettings: safetySettings.length > 0 ? safetySettings : undefined,
|
1140
|
+
useSearchGrounding,
|
1141
|
+
...(cachedContent && { cachedContent }),
|
1142
|
+
...(responseModalities.length > 0 && { responseModalities }),
|
1143
|
+
...(thinkingBudget > 0 && {
|
1144
|
+
thinkingConfig: {
|
1145
|
+
thinkingBudget,
|
1146
|
+
includeThoughts,
|
1147
|
+
},
|
1148
|
+
}),
|
1149
|
+
};
|
1150
|
+
}
|
1151
|
+
else if (provider === 'openrouter') {
|
1152
|
+
modelSettings = {};
|
1153
|
+
}
|
1154
|
+
const input = await buildInput(this, i);
|
1155
|
+
let tools = undefined;
|
1156
|
+
if (provider === 'google') {
|
1157
|
+
const googleTools = this.getNodeParameter('googleTools', i, []);
|
1158
|
+
if (googleTools.length > 0) {
|
1159
|
+
tools = {};
|
1160
|
+
const google = require('@ai-sdk/google').google;
|
1161
|
+
if (googleTools.includes('google_search')) {
|
1162
|
+
tools.google_search = google.tools.googleSearch({});
|
1163
|
+
}
|
1164
|
+
if (googleTools.includes('url_context')) {
|
1165
|
+
tools.url_context = google.tools.urlContext({});
|
1166
|
+
}
|
1167
|
+
if (googleTools.includes('code_execution')) {
|
1168
|
+
tools.code_execution = google.tools.codeExecution({});
|
1169
|
+
}
|
1170
|
+
}
|
1171
|
+
}
|
1172
|
+
const enableStreaming = this.getNodeParameter('enableStreaming', i, false);
|
1173
|
+
if (operation === 'generateText') {
|
1174
|
+
const generateTextParams = {
|
1175
|
+
model: aiProvider(model, modelSettings),
|
1176
|
+
messages: input.messages,
|
1177
|
+
maxTokens: options.maxTokens,
|
1178
|
+
temperature: options.temperature,
|
1179
|
+
prompt: input.prompt,
|
1180
|
+
system: input.system,
|
1181
|
+
...(tools && { tools }),
|
1182
|
+
};
|
1183
|
+
if (stopSequences && (provider === 'google' || provider === 'openrouter')) {
|
1184
|
+
generateTextParams.stopSequences = stopSequences;
|
1185
|
+
}
|
1186
|
+
if (enableStreaming) {
|
1187
|
+
const stream = await (0, ai_1.streamText)(generateTextParams);
|
1188
|
+
let fullText = '';
|
1189
|
+
const chunks = [];
|
1190
|
+
for await (const textPart of stream.textStream) {
|
1191
|
+
fullText += textPart;
|
1192
|
+
chunks.push(textPart);
|
1193
|
+
}
|
1194
|
+
const finalUsage = await stream.usage;
|
1195
|
+
for (const chunk of chunks) {
|
1196
|
+
returnData.push({
|
1197
|
+
json: {
|
1198
|
+
chunk,
|
1199
|
+
isStreaming: true,
|
1200
|
+
}
|
1201
|
+
});
|
1202
|
+
}
|
1203
|
+
returnData.push({
|
1204
|
+
json: {
|
1205
|
+
text: fullText,
|
1206
|
+
toolCalls: stream.toolCalls || [],
|
1207
|
+
toolResults: stream.toolResults || [],
|
1208
|
+
finishReason: stream.finishReason,
|
1209
|
+
usage: {
|
1210
|
+
promptTokens: finalUsage.promptTokens,
|
1211
|
+
completionTokens: finalUsage.completionTokens,
|
1212
|
+
totalTokens: finalUsage.totalTokens,
|
1213
|
+
},
|
1214
|
+
isStreaming: false,
|
1215
|
+
isFinal: true,
|
1216
|
+
}
|
1217
|
+
});
|
1218
|
+
}
|
1219
|
+
else {
|
1220
|
+
const result = await (0, ai_1.generateText)(generateTextParams);
|
1221
|
+
const formatted = formatTextResult(result, options.includeRequestBody, provider);
|
1222
|
+
if (tools && result.toolCalls) {
|
1223
|
+
formatted.toolCalls = result.toolCalls;
|
1224
|
+
formatted.toolResults = result.toolResults;
|
1225
|
+
}
|
1226
|
+
returnData.push({ json: formatted });
|
1227
|
+
}
|
1228
|
+
}
|
1229
|
+
else {
|
1230
|
+
const schemaName = this.getNodeParameter('schemaName', i, '');
|
1231
|
+
const schemaDescription = this.getNodeParameter('schemaDescription', i, '');
|
1232
|
+
const rawSchema = this.getNodeParameter('schema', i);
|
1233
|
+
let parsedSchema;
|
1234
|
+
try {
|
1235
|
+
parsedSchema = JSON.parse(rawSchema);
|
1236
|
+
}
|
1237
|
+
catch (err) {
|
1238
|
+
throw new n8n_workflow_1.NodeOperationError(this.getNode(), 'Schema is not valid JSON: ' + err.message);
|
1239
|
+
}
|
1240
|
+
const ajv = new ajv_1.default();
|
1241
|
+
if (!ajv.validateSchema(parsedSchema)) {
|
1242
|
+
throw new n8n_workflow_1.NodeOperationError(this.getNode(), `Invalid JSON Schema: ${ajv.errorsText(ajv.errors)}`);
|
1243
|
+
}
|
1244
|
+
const result = await (0, ai_1.generateObject)({
|
1245
|
+
model: aiProvider(model, modelSettings),
|
1246
|
+
schema: (0, ai_1.jsonSchema)(parsedSchema),
|
1247
|
+
schemaName,
|
1248
|
+
schemaDescription,
|
1249
|
+
prompt: input.prompt,
|
1250
|
+
system: input.system,
|
1251
|
+
messages: input.messages,
|
1252
|
+
maxTokens: options.maxTokens,
|
1253
|
+
temperature: options.temperature,
|
1254
|
+
...(stopSequences && (provider === 'google' || provider === 'openrouter') && { stopSequences }),
|
1255
|
+
});
|
1256
|
+
const formatted = formatObjectResult(result, options.includeRequestBody);
|
1257
|
+
returnData.push({ json: formatted });
|
1258
|
+
}
|
1259
|
+
}
|
1260
|
+
catch (error) {
|
1261
|
+
if (this.continueOnFail()) {
|
1262
|
+
returnData.push({ json: { error: error.message } });
|
1263
|
+
}
|
1264
|
+
else {
|
1265
|
+
throw new n8n_workflow_1.NodeOperationError(this.getNode(), error, { itemIndex: i });
|
1266
|
+
}
|
1267
|
+
}
|
1268
|
+
}
|
1269
|
+
return [returnData];
|
1270
|
+
}
|
1271
|
+
}
|
1272
|
+
exports.UniversalAI = UniversalAI;
|
1273
|
+
//# sourceMappingURL=UniversalAI.node.js.map
|