@alpic80/rivet-core 1.19.1-aidon.2 → 1.24.0-aidon.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (107) hide show
  1. package/README.md +4 -0
  2. package/dist/cjs/bundle.cjs +4187 -1020
  3. package/dist/cjs/bundle.cjs.map +4 -4
  4. package/dist/esm/api/createProcessor.js +8 -17
  5. package/dist/esm/api/looseDataValue.js +16 -0
  6. package/dist/esm/exports.js +2 -0
  7. package/dist/esm/integrations/CodeRunner.js +36 -0
  8. package/dist/esm/integrations/GptTokenizerTokenizer.js +7 -4
  9. package/dist/esm/integrations/openai/OpenAIEmbeddingGenerator.js +1 -1
  10. package/dist/esm/model/DataValue.js +14 -2
  11. package/dist/esm/model/GraphProcessor.js +275 -104
  12. package/dist/esm/model/NodeBase.js +11 -1
  13. package/dist/esm/model/NodeImpl.js +8 -0
  14. package/dist/esm/model/Nodes.js +31 -4
  15. package/dist/esm/model/ProjectReferenceLoader.js +1 -0
  16. package/dist/esm/model/nodes/AssembleMessageNode.js +12 -2
  17. package/dist/esm/model/nodes/AssemblePromptNode.js +22 -0
  18. package/dist/esm/model/nodes/CallGraphNode.js +3 -4
  19. package/dist/esm/model/nodes/ChatLoopNode.js +150 -0
  20. package/dist/esm/model/nodes/ChatNode.js +7 -934
  21. package/dist/esm/model/nodes/ChatNodeBase.js +1275 -0
  22. package/dist/esm/model/nodes/ChunkNode.js +2 -2
  23. package/dist/esm/model/nodes/CodeNode.js +40 -4
  24. package/dist/esm/model/nodes/CronNode.js +248 -0
  25. package/dist/esm/model/nodes/DelegateFunctionCallNode.js +37 -12
  26. package/dist/esm/model/nodes/DestructureNode.js +1 -1
  27. package/dist/esm/model/nodes/DocumentNode.js +183 -0
  28. package/dist/esm/model/nodes/ExtractJsonNode.js +4 -4
  29. package/dist/esm/model/nodes/ExtractRegexNode.js +10 -11
  30. package/dist/esm/model/nodes/GetEmbeddingNode.js +1 -1
  31. package/dist/esm/model/nodes/HttpCallNode.js +3 -1
  32. package/dist/esm/model/nodes/IfNode.js +5 -0
  33. package/dist/esm/model/nodes/ImageToMDNode.js +116 -0
  34. package/dist/esm/model/nodes/LoopUntilNode.js +214 -0
  35. package/dist/esm/model/nodes/PromptNode.js +29 -6
  36. package/dist/esm/model/nodes/ReadAllFilesNode.js +210 -0
  37. package/dist/esm/model/nodes/ReadDirectoryNode.js +31 -25
  38. package/dist/esm/model/nodes/ReferencedGraphAliasNode.js +199 -0
  39. package/dist/esm/model/nodes/TextNode.js +9 -4
  40. package/dist/esm/model/nodes/ToMarkdownTableNode.js +119 -0
  41. package/dist/esm/model/nodes/ToTreeNode.js +133 -0
  42. package/dist/esm/model/nodes/{GptFunctionNode.js → ToolNode.js} +10 -10
  43. package/dist/esm/model/nodes/UserInputNode.js +10 -12
  44. package/dist/esm/plugins/aidon/nodes/ChatAidonNode.js +3 -3
  45. package/dist/esm/plugins/anthropic/anthropic.js +29 -10
  46. package/dist/esm/plugins/anthropic/fetchEventSource.js +3 -2
  47. package/dist/esm/plugins/anthropic/nodes/ChatAnthropicNode.js +267 -147
  48. package/dist/esm/plugins/anthropic/plugin.js +9 -1
  49. package/dist/esm/plugins/gentrace/plugin.js +6 -6
  50. package/dist/esm/plugins/google/google.js +113 -5
  51. package/dist/esm/plugins/google/nodes/ChatGoogleNode.js +211 -54
  52. package/dist/esm/plugins/google/plugin.js +13 -6
  53. package/dist/esm/plugins/openai/nodes/RunThreadNode.js +2 -2
  54. package/dist/esm/recording/ExecutionRecorder.js +5 -1
  55. package/dist/esm/utils/chatMessageToOpenAIChatCompletionMessage.js +15 -2
  56. package/dist/esm/utils/coerceType.js +1 -1
  57. package/dist/esm/utils/fetchEventSource.js +1 -1
  58. package/dist/esm/utils/interpolation.js +108 -3
  59. package/dist/esm/utils/openai.js +106 -50
  60. package/dist/esm/utils/paths.js +80 -0
  61. package/dist/esm/utils/serialization/serialization_v4.js +5 -0
  62. package/dist/types/api/createProcessor.d.ts +11 -5
  63. package/dist/types/api/looseDataValue.d.ts +4 -0
  64. package/dist/types/api/streaming.d.ts +1 -1
  65. package/dist/types/exports.d.ts +2 -0
  66. package/dist/types/integrations/CodeRunner.d.ts +18 -0
  67. package/dist/types/model/DataValue.d.ts +29 -6
  68. package/dist/types/model/EditorDefinition.d.ts +6 -1
  69. package/dist/types/model/GraphProcessor.d.ts +14 -7
  70. package/dist/types/model/NodeBase.d.ts +4 -0
  71. package/dist/types/model/NodeImpl.d.ts +5 -4
  72. package/dist/types/model/Nodes.d.ts +13 -4
  73. package/dist/types/model/ProcessContext.d.ts +16 -1
  74. package/dist/types/model/Project.d.ts +19 -7
  75. package/dist/types/model/ProjectReferenceLoader.d.ts +5 -0
  76. package/dist/types/model/RivetPlugin.d.ts +6 -0
  77. package/dist/types/model/RivetUIContext.d.ts +5 -1
  78. package/dist/types/model/Settings.d.ts +1 -0
  79. package/dist/types/model/nodes/AssemblePromptNode.d.ts +4 -1
  80. package/dist/types/model/nodes/ChatLoopNode.d.ts +21 -0
  81. package/dist/types/model/nodes/ChatNode.d.ts +2 -62
  82. package/dist/types/model/nodes/ChatNodeBase.d.ts +85 -0
  83. package/dist/types/model/nodes/CodeNode.d.ts +8 -2
  84. package/dist/types/model/nodes/CronNode.d.ts +34 -0
  85. package/dist/types/model/nodes/DelegateFunctionCallNode.d.ts +1 -0
  86. package/dist/types/model/nodes/DocumentNode.d.ts +28 -0
  87. package/dist/types/model/nodes/ImageToMDNode.d.ts +20 -0
  88. package/dist/types/model/nodes/LoopUntilNode.d.ts +32 -0
  89. package/dist/types/model/nodes/PromptNode.d.ts +2 -0
  90. package/dist/types/model/nodes/ReadAllFilesNode.d.ts +30 -0
  91. package/dist/types/model/nodes/ReadDirectoryNode.d.ts +1 -1
  92. package/dist/types/model/nodes/ReferencedGraphAliasNode.d.ts +31 -0
  93. package/dist/types/model/nodes/ToMarkdownTableNode.d.ts +19 -0
  94. package/dist/types/model/nodes/ToTreeNode.d.ts +21 -0
  95. package/dist/types/model/nodes/UserInputNode.d.ts +2 -3
  96. package/dist/types/plugins/anthropic/anthropic.d.ts +94 -13
  97. package/dist/types/plugins/anthropic/nodes/ChatAnthropicNode.d.ts +7 -2
  98. package/dist/types/plugins/google/google.d.ts +93 -18
  99. package/dist/types/plugins/google/nodes/ChatGoogleNode.d.ts +3 -2
  100. package/dist/types/recording/RecordedEvents.d.ts +2 -0
  101. package/dist/types/utils/base64.d.ts +1 -1
  102. package/dist/types/utils/chatMessageToOpenAIChatCompletionMessage.d.ts +3 -1
  103. package/dist/types/utils/interpolation.d.ts +3 -0
  104. package/dist/types/utils/openai.d.ts +127 -21
  105. package/dist/types/utils/paths.d.ts +8 -0
  106. package/package.json +15 -11
  107. /package/dist/types/model/nodes/{GptFunctionNode.d.ts → ToolNode.d.ts} +0 -0
@@ -1,4 +1,6 @@
1
- export const googleModels = {
1
+ import { FunctionCallingMode, } from '@google/generative-ai';
2
+ import { P, match } from 'ts-pattern';
3
+ export const googleModelsDeprecated = {
2
4
  'gemini-pro': {
3
5
  maxTokens: 32760,
4
6
  cost: {
@@ -16,16 +18,123 @@ export const googleModels = {
16
18
  displayName: 'Gemini Pro Vision',
17
19
  },
18
20
  };
19
- export const googleModelOptions = Object.entries(googleModels).map(([id, { displayName }]) => ({
21
+ export const generativeAiGoogleModels = {
22
+ 'gemini-2.0-flash-001': {
23
+ maxTokens: 1048576,
24
+ cost: {
25
+ prompt: 0.15 / 1000,
26
+ completion: 0.6 / 1000,
27
+ },
28
+ displayName: 'Gemini 2.0 Flash',
29
+ },
30
+ 'gemini-2.0-pro-exp-02-05': {
31
+ maxTokens: 2097152,
32
+ cost: {
33
+ prompt: 0, // Unknown
34
+ completion: 0, // Unknown
35
+ },
36
+ displayName: 'Gemini 2.0 Pro',
37
+ },
38
+ 'gemini-2.0-flash-lite-preview-02-05': {
39
+ maxTokens: 1048576,
40
+ cost: {
41
+ prompt: 0.075 / 1000,
42
+ completion: 0.3 / 1000,
43
+ },
44
+ displayName: 'Gemini 2.0 Flash Lite',
45
+ },
46
+ 'gemini-2.0-flash-thinking-exp-01-21': {
47
+ maxTokens: 1048576,
48
+ cost: {
49
+ prompt: 0, // Unknown
50
+ completion: 0, // Unknown
51
+ },
52
+ displayName: 'Gemini 2.0 Flash Thinking',
53
+ },
54
+ 'gemini-1.5-flash': {
55
+ maxTokens: 1048576,
56
+ cost: {
57
+ prompt: 0, // It's per-character wtf
58
+ completion: 0, // It's per-character
59
+ },
60
+ displayName: 'Gemini 1.5 Flash',
61
+ },
62
+ 'gemini-1.5-pro': {
63
+ maxTokens: 2097152,
64
+ cost: {
65
+ prompt: 0, // It's per-character wtf
66
+ completion: 0, // It's per-character
67
+ },
68
+ displayName: 'Gemini 1.5 Pro',
69
+ },
70
+ 'gemini-1.0-pro': {
71
+ maxTokens: 32760,
72
+ cost: {
73
+ prompt: 0, // It's per-character wtf
74
+ completion: 0, // 1It's per-character
75
+ },
76
+ displayName: 'Gemini 1.0 Pro',
77
+ },
78
+ 'gemini-1.0-pro-vision': {
79
+ maxTokens: 16384,
80
+ cost: {
81
+ prompt: 0, // It's per-character wtf
82
+ completion: 0, // It's per-character
83
+ },
84
+ displayName: 'Gemini 1.0 Pro Vision',
85
+ },
86
+ };
87
+ export const googleModelOptionsDeprecated = Object.entries(googleModelsDeprecated).map(([id, { displayName }]) => ({
20
88
  value: id,
21
89
  label: displayName,
22
90
  }));
91
+ export const generativeAiOptions = Object.entries(generativeAiGoogleModels).map(([id, { displayName }]) => ({
92
+ value: id,
93
+ label: displayName,
94
+ }));
95
+ export async function* streamGenerativeAi({ apiKey, model, systemPrompt, prompt, maxOutputTokens, temperature, topP, topK, signal, tools, }) {
96
+ const { GoogleGenerativeAI } = await import('@google/generative-ai');
97
+ const genAi = new GoogleGenerativeAI(apiKey);
98
+ const genaiModel = genAi.getGenerativeModel({
99
+ model,
100
+ systemInstruction: systemPrompt,
101
+ generationConfig: {
102
+ maxOutputTokens,
103
+ temperature,
104
+ topP,
105
+ topK,
106
+ },
107
+ tools,
108
+ });
109
+ const result = await genaiModel.generateContentStream({
110
+ contents: prompt,
111
+ }, { signal });
112
+ for await (const chunk of result.stream) {
113
+ const outChunk = {
114
+ completion: undefined,
115
+ finish_reason: undefined,
116
+ function_calls: undefined,
117
+ model,
118
+ };
119
+ const functionCalls = chunk.functionCalls();
120
+ if (functionCalls) {
121
+ outChunk.function_calls = functionCalls;
122
+ }
123
+ if (chunk.candidates) {
124
+ outChunk.completion = chunk.candidates[0]?.content?.parts[0]?.text;
125
+ outChunk.finish_reason = chunk.candidates[0]?.finishReason;
126
+ }
127
+ if (outChunk.completion || outChunk.function_calls) {
128
+ yield outChunk;
129
+ }
130
+ }
131
+ }
23
132
  export async function* streamChatCompletions({ project, location, applicationCredentials, model, signal, max_output_tokens, temperature, top_p, top_k, prompt, }) {
24
133
  const defaultSignal = new AbortController().signal;
25
134
  // If you import normally, the Google auth library throws a fit.
26
135
  const { VertexAI } = await import('@google-cloud/vertexai');
27
136
  // Can't find a way to pass the credentials path in
28
- process.env['GOOGLE_APPLICATION_CREDENTIALS'] = applicationCredentials;
137
+ process.env.GOOGLE_APPLICATION_CREDENTIALS = applicationCredentials;
29
138
  const vertexAi = new VertexAI({ project, location });
30
139
  const generativeModel = vertexAi.preview.getGenerativeModel({
31
140
  model,
@@ -37,11 +146,10 @@ export async function* streamChatCompletions({ project, location, applicationCre
37
146
  },
38
147
  });
39
148
  const response = await generativeModel.generateContentStream({
40
- contents: prompt,
149
+ contents: prompt, // crazy type stuff but... this is good enough, this is legacy
41
150
  });
42
151
  let hadChunks = false;
43
152
  for await (const chunk of response.stream) {
44
- console.log('streaming google responses');
45
153
  hadChunks = true;
46
154
  if (!signal?.aborted && chunk.candidates[0]?.content.parts[0]?.text) {
47
155
  yield {
@@ -1,5 +1,5 @@
1
- import { uint8ArrayToBase64, } from '../../../index.js';
2
- import { googleModelOptions, googleModels, streamChatCompletions, } from '../google.js';
1
+ import {} from '../../../index.js';
2
+ import { streamChatCompletions, streamGenerativeAi, generativeAiGoogleModels, generativeAiOptions, } from '../google.js';
3
3
  import { nanoid } from 'nanoid/non-secure';
4
4
  import { dedent } from 'ts-dedent';
5
5
  import retry from 'p-retry';
@@ -7,8 +7,12 @@ import { match } from 'ts-pattern';
7
7
  import { coerceType, coerceTypeOptional } from '../../../utils/coerceType.js';
8
8
  import { addWarning } from '../../../utils/outputs.js';
9
9
  import { getError } from '../../../utils/errors.js';
10
+ import { uint8ArrayToBase64 } from '../../../utils/base64.js';
10
11
  import { pluginNodeDefinition } from '../../../model/NodeDefinition.js';
11
12
  import { getScalarTypeOf, isArrayDataValue } from '../../../model/DataValue.js';
13
+ import { getInputOrData } from '../../../utils/inputs.js';
14
+ import { GoogleGenerativeAIError, SchemaType, } from '@google/generative-ai';
15
+ import { mapValues } from 'lodash-es';
12
16
  // Temporary
13
17
  const cache = new Map();
14
18
  export const ChatGoogleNodeImpl = {
@@ -23,7 +27,7 @@ export const ChatGoogleNodeImpl = {
23
27
  width: 275,
24
28
  },
25
29
  data: {
26
- model: 'gemini-pro',
30
+ model: 'gemini-2.0-flash-001',
27
31
  useModelInput: false,
28
32
  temperature: 0.5,
29
33
  useTemperatureInput: false,
@@ -37,12 +41,20 @@ export const ChatGoogleNodeImpl = {
37
41
  useMaxTokensInput: false,
38
42
  cache: false,
39
43
  useAsGraphPartialOutput: true,
44
+ useToolCalling: false,
40
45
  },
41
46
  };
42
47
  return chartNode;
43
48
  },
44
49
  getInputDefinitions(data) {
45
50
  const inputs = [];
51
+ inputs.push({
52
+ id: 'systemPrompt',
53
+ title: 'System Prompt',
54
+ dataType: 'string',
55
+ required: false,
56
+ description: 'An optional system prompt for the model to use.',
57
+ });
46
58
  if (data.useModelInput) {
47
59
  inputs.push({
48
60
  id: 'model',
@@ -79,6 +91,14 @@ export const ChatGoogleNodeImpl = {
79
91
  title: 'Max Tokens',
80
92
  });
81
93
  }
94
+ if (data.useToolCalling) {
95
+ inputs.push({
96
+ dataType: 'gpt-function[]',
97
+ id: 'functions',
98
+ title: 'Tools',
99
+ description: 'Tools available for the model to call.',
100
+ });
101
+ }
82
102
  inputs.push({
83
103
  dataType: ['chat-message', 'chat-message[]'],
84
104
  id: 'prompt',
@@ -105,11 +125,19 @@ export const ChatGoogleNodeImpl = {
105
125
  title: 'All Messages',
106
126
  description: 'All messages, with the response appended.',
107
127
  });
128
+ if (data.useToolCalling) {
129
+ outputs.push({
130
+ dataType: 'object[]',
131
+ id: 'function-calls',
132
+ title: 'Tool Calls',
133
+ description: 'Tool calls made by the model.',
134
+ });
135
+ }
108
136
  return outputs;
109
137
  },
110
138
  getBody(data) {
111
139
  return dedent `
112
- ${googleModels[data.model]?.displayName ?? `Google (${data.model})`}
140
+ ${generativeAiGoogleModels[data.model]?.displayName ?? `Google (${data.model})`}
113
141
  ${data.useTopP
114
142
  ? `Top P: ${data.useTopPInput ? '(Using Input)' : data.top_p}`
115
143
  : `Temperature: ${data.useTemperatureInput ? '(Using Input)' : data.temperature}`}
@@ -123,7 +151,7 @@ export const ChatGoogleNodeImpl = {
123
151
  label: 'Model',
124
152
  dataKey: 'model',
125
153
  useInputToggleDataKey: 'useModelInput',
126
- options: googleModelOptions,
154
+ options: generativeAiOptions,
127
155
  },
128
156
  {
129
157
  type: 'number',
@@ -158,6 +186,11 @@ export const ChatGoogleNodeImpl = {
158
186
  max: Number.MAX_SAFE_INTEGER,
159
187
  step: 1,
160
188
  },
189
+ {
190
+ type: 'toggle',
191
+ label: 'Enable Tool Calling',
192
+ dataKey: 'useToolCalling',
193
+ },
161
194
  {
162
195
  type: 'toggle',
163
196
  label: 'Cache (same inputs, same outputs)',
@@ -182,29 +215,23 @@ export const ChatGoogleNodeImpl = {
182
215
  },
183
216
  async process(data, inputs, context) {
184
217
  const output = {};
185
- const rawModel = data.useModelInput
186
- ? coerceTypeOptional(inputs['model'], 'string') ?? data.model
187
- : data.model;
218
+ const systemPrompt = coerceTypeOptional(inputs['systemPrompt'], 'string');
219
+ const rawModel = getInputOrData(data, inputs, 'model');
188
220
  const model = rawModel;
189
- const temperature = data.useTemperatureInput
190
- ? coerceTypeOptional(inputs['temperature'], 'number') ?? data.temperature
191
- : data.temperature;
192
- const topP = data.useTopPInput ? coerceTypeOptional(inputs['top_p'], 'number') ?? data.top_p : data.top_p;
193
- const useTopP = data.useUseTopPInput
194
- ? coerceTypeOptional(inputs['useTopP'], 'boolean') ?? data.useTopP
195
- : data.useTopP;
221
+ const temperature = getInputOrData(data, inputs, 'temperature', 'number');
222
+ const topP = getInputOrData(data, inputs, 'top_p', 'number');
223
+ const useTopP = getInputOrData(data, inputs, 'useTopP', 'boolean');
196
224
  const { messages } = getChatGoogleNodeMessages(inputs);
197
- const prompt = await Promise.all(messages.map(async (message) => {
198
- return {
199
- role: message.type === 'user' ? 'user' : 'assistant',
200
- parts: await Promise.all([message.message].flat().map(async (part) => {
225
+ let prompt = await Promise.all(messages.map(async (message) => {
226
+ if (message.type === 'user' || message.type === 'assistant') {
227
+ const parts = await Promise.all([message.message].flat().map(async (part) => {
201
228
  if (typeof part === 'string') {
202
229
  return { text: part };
203
230
  }
204
231
  else if (part.type === 'image') {
205
232
  return {
206
- inline_data: {
207
- mime_type: part.mediaType,
233
+ inlineData: {
234
+ mimeType: part.mediaType,
208
235
  data: (await uint8ArrayToBase64(part.data)),
209
236
  },
210
237
  };
@@ -212,9 +239,58 @@ export const ChatGoogleNodeImpl = {
212
239
  else {
213
240
  throw new Error(`Google Vertex AI does not support message parts of type ${part.type}`);
214
241
  }
215
- })),
216
- };
242
+ }));
243
+ if (message.type === 'assistant' && (message.function_calls?.length ?? 0) > 0) {
244
+ if (parts[0].text === '') {
245
+ parts.shift(); // remove empty text part
246
+ }
247
+ for (const call of message.function_calls ?? []) {
248
+ parts.push({
249
+ functionCall: {
250
+ name: call.name,
251
+ args: JSON.parse(call.arguments),
252
+ },
253
+ });
254
+ }
255
+ }
256
+ return {
257
+ role: message.type,
258
+ parts,
259
+ };
260
+ }
261
+ if (message.type === 'function') {
262
+ return {
263
+ role: 'function',
264
+ parts: [
265
+ {
266
+ functionResponse: {
267
+ name: message.name,
268
+ response: {
269
+ result: typeof message.message === 'string' ? message.message : '',
270
+ },
271
+ },
272
+ },
273
+ ],
274
+ };
275
+ }
276
+ throw new Error(`Google Vertex AI does not support message type ${message.type}`);
217
277
  }));
278
+ // Collapse sequential function responses into a single function response with mutliple parts
279
+ prompt = prompt.reduce((acc, message) => {
280
+ const lastMessage = acc.at(-1);
281
+ // Shouldn't be undefined but not sure if this is where the crash is happening...
282
+ if (lastMessage &&
283
+ message.role === 'function' &&
284
+ lastMessage.role === 'function' &&
285
+ lastMessage?.parts &&
286
+ message.parts) {
287
+ lastMessage.parts.push(...message.parts);
288
+ }
289
+ else {
290
+ acc.push(message);
291
+ }
292
+ return acc;
293
+ }, []);
218
294
  let { maxTokens } = data;
219
295
  const tokenizerInfo = {
220
296
  node: context.node,
@@ -223,25 +299,53 @@ export const ChatGoogleNodeImpl = {
223
299
  };
224
300
  // TODO Better token counting for Google models.
225
301
  const tokenCount = await context.tokenizer.getTokenCountForMessages(messages, undefined, tokenizerInfo);
226
- if (googleModels[model] && tokenCount >= googleModels[model].maxTokens) {
227
- throw new Error(`The model ${model} can only handle ${googleModels[model].maxTokens} tokens, but ${tokenCount} were provided in the prompts alone.`);
302
+ if (generativeAiGoogleModels[model] && tokenCount >= generativeAiGoogleModels[model].maxTokens) {
303
+ throw new Error(`The model ${model} can only handle ${generativeAiGoogleModels[model].maxTokens} tokens, but ${tokenCount} were provided in the prompts alone.`);
228
304
  }
229
- if (googleModels[model] && tokenCount + maxTokens > googleModels[model].maxTokens) {
230
- const message = `The model can only handle a maximum of ${googleModels[model].maxTokens} tokens, but the prompts and max tokens together exceed this limit. The max tokens has been reduced to ${googleModels[model].maxTokens - tokenCount}.`;
305
+ if (generativeAiGoogleModels[model] && tokenCount + maxTokens > generativeAiGoogleModels[model].maxTokens) {
306
+ const message = `The model can only handle a maximum of ${generativeAiGoogleModels[model].maxTokens} tokens, but the prompts and max tokens together exceed this limit. The max tokens has been reduced to ${generativeAiGoogleModels[model].maxTokens - tokenCount}.`;
231
307
  addWarning(output, message);
232
- maxTokens = Math.floor((googleModels[model].maxTokens - tokenCount) * 0.95); // reduce max tokens by 5% to be safe, calculation is a little wrong.
308
+ maxTokens = Math.floor((generativeAiGoogleModels[model].maxTokens - tokenCount) * 0.95); // reduce max tokens by 5% to be safe, calculation is a little wrong.
233
309
  }
234
310
  const project = context.getPluginConfig('googleProjectId');
235
311
  const location = context.getPluginConfig('googleRegion');
236
312
  const applicationCredentials = context.getPluginConfig('googleApplicationCredentials');
237
- if (project == null) {
238
- throw new Error('Google Project ID is not defined.');
239
- }
240
- if (location == null) {
241
- throw new Error('Google Region is not defined.');
313
+ const apiKey = context.getPluginConfig('googleApiKey');
314
+ let tools = [];
315
+ if (data.useToolCalling) {
316
+ const gptTools = coerceTypeOptional(inputs['functions'], 'gpt-function[]') ?? [];
317
+ if (gptTools) {
318
+ tools = [
319
+ {
320
+ functionDeclarations: gptTools.map((tool) => ({
321
+ name: tool.name,
322
+ description: tool.description,
323
+ parameters: Object.keys(tool.parameters.properties).length === 0
324
+ ? undefined
325
+ : {
326
+ type: SchemaType.OBJECT,
327
+ properties: mapValues(tool.parameters.properties, (p) => ({
328
+ // gemini doesn't support union property types, it uses openapi style not jsonschema, what a mess
329
+ type: Array.isArray(p.type) ? p.type.filter((t) => t !== 'null')[0] : p.type,
330
+ description: p.description,
331
+ })),
332
+ required: tool.parameters.required || [],
333
+ },
334
+ })),
335
+ },
336
+ ];
337
+ }
242
338
  }
243
- if (applicationCredentials == null) {
244
- throw new Error('Google Application Credentials is not defined.');
339
+ if (!apiKey) {
340
+ if (project == null) {
341
+ throw new Error('Google Project ID or Google API Key is not defined.');
342
+ }
343
+ if (location == null) {
344
+ throw new Error('Google Region or Google API Key is not defined.');
345
+ }
346
+ if (applicationCredentials == null) {
347
+ throw new Error('Google Application Credentials or Google API Key is not defined.');
348
+ }
245
349
  }
246
350
  try {
247
351
  return await retry(async () => {
@@ -249,8 +353,11 @@ export const ChatGoogleNodeImpl = {
249
353
  prompt,
250
354
  model,
251
355
  temperature: useTopP ? undefined : temperature,
252
- top_p: useTopP ? topP : undefined,
253
- max_output_tokens: maxTokens,
356
+ topP: useTopP ? topP : undefined,
357
+ maxOutputTokens: maxTokens,
358
+ systemPrompt,
359
+ topK: undefined,
360
+ tools,
254
361
  };
255
362
  const cacheKey = JSON.stringify(options);
256
363
  if (data.cache) {
@@ -260,24 +367,59 @@ export const ChatGoogleNodeImpl = {
260
367
  }
261
368
  }
262
369
  const startTime = Date.now();
263
- const chunks = streamChatCompletions({
264
- signal: context.signal,
265
- project,
266
- location,
267
- applicationCredentials,
268
- ...options,
269
- });
370
+ let chunks;
371
+ if (data.useToolCalling && !apiKey) {
372
+ throw new Error('Tool calling is only supported when using a generative API key.');
373
+ }
374
+ if (apiKey) {
375
+ chunks = streamGenerativeAi({
376
+ signal: context.signal,
377
+ model,
378
+ prompt,
379
+ maxOutputTokens: maxTokens,
380
+ temperature: useTopP ? undefined : temperature,
381
+ topP: useTopP ? topP : undefined,
382
+ topK: undefined,
383
+ apiKey,
384
+ systemPrompt,
385
+ tools,
386
+ });
387
+ }
388
+ else {
389
+ chunks = streamChatCompletions({
390
+ signal: context.signal,
391
+ model: model,
392
+ prompt,
393
+ max_output_tokens: maxTokens,
394
+ temperature: useTopP ? undefined : temperature,
395
+ top_p: useTopP ? topP : undefined,
396
+ top_k: undefined,
397
+ project: project,
398
+ location: location,
399
+ applicationCredentials: applicationCredentials,
400
+ });
401
+ }
270
402
  const responseParts = [];
403
+ const functionCalls = [];
271
404
  for await (const chunk of chunks) {
272
- if (!chunk.completion) {
273
- // Could be error for some reason 🤷‍♂️ but ignoring has worked for me so far.
274
- continue;
405
+ if (chunk.completion) {
406
+ responseParts.push(chunk.completion);
407
+ output['response'] = {
408
+ type: 'string',
409
+ value: responseParts.join('').trim(),
410
+ };
411
+ }
412
+ if (chunk.function_calls) {
413
+ functionCalls.push(...chunk.function_calls);
414
+ output['function-calls'] = {
415
+ type: 'object[]',
416
+ value: functionCalls.map((fc) => ({
417
+ id: fc.name,
418
+ name: fc.name,
419
+ arguments: fc.args,
420
+ })),
421
+ };
275
422
  }
276
- responseParts.push(chunk.completion);
277
- output['response'] = {
278
- type: 'string',
279
- value: responseParts.join('').trim(),
280
- };
281
423
  context.onPartialOutputs?.(output);
282
424
  }
283
425
  const endTime = Date.now();
@@ -289,7 +431,13 @@ export const ChatGoogleNodeImpl = {
289
431
  type: 'assistant',
290
432
  message: responseParts.join('').trim() ?? '',
291
433
  function_call: undefined,
292
- function_calls: undefined,
434
+ function_calls: functionCalls.length === 0
435
+ ? undefined
436
+ : functionCalls.map((fc) => ({
437
+ id: fc.name,
438
+ name: fc.name,
439
+ arguments: JSON.stringify(fc.args),
440
+ })),
293
441
  },
294
442
  ],
295
443
  };
@@ -297,7 +445,7 @@ export const ChatGoogleNodeImpl = {
297
445
  type: 'chat-message[]',
298
446
  value: messages,
299
447
  };
300
- if (responseParts.length === 0) {
448
+ if (responseParts.length === 0 && functionCalls.length === 0) {
301
449
  throw new Error('No response from Google');
302
450
  }
303
451
  output['requestTokens'] = { type: 'number', value: tokenCount };
@@ -322,6 +470,15 @@ export const ChatGoogleNodeImpl = {
322
470
  signal: context.signal,
323
471
  onFailedAttempt(err) {
324
472
  context.trace(`ChatGoogleNode failed, retrying: ${err.toString()}`);
473
+ const googleError = err;
474
+ if (googleError.status && googleError.status >= 400 && googleError.status < 500) {
475
+ if (googleError.status === 429) {
476
+ context.trace('Google API rate limit exceeded, retrying...');
477
+ }
478
+ else {
479
+ throw new Error(`Google API error: ${googleError.status} ${googleError.message}`);
480
+ }
481
+ }
325
482
  if (context.signal.aborted) {
326
483
  throw new Error('Aborted');
327
484
  }
@@ -7,26 +7,33 @@ export const googlePlugin = {
7
7
  register(chatGoogleNode);
8
8
  },
9
9
  configSpec: {
10
+ googleApiKey: {
11
+ type: 'secret',
12
+ label: 'Google API Key',
13
+ description: 'The API key for accessing Google generative AI.',
14
+ pullEnvironmentVariable: 'GOOGLE_GENERATIVE_AI_API_KEY',
15
+ helperText: 'You may also set the GOOGLE_GENERATIVE_AI_API_KEY environment variable.',
16
+ },
10
17
  googleProjectId: {
11
18
  type: 'string',
12
- label: 'Google Project ID',
19
+ label: 'Google Project ID (Deprecated)',
13
20
  description: 'The Google project ID.',
14
21
  pullEnvironmentVariable: 'GCP_PROJECT',
15
- helperText: 'You may also set the GCP_PROJECT environment variable.',
22
+ helperText: 'Deprecated, use Google API Key instead. You may also set the GCP_PROJECT environment variable.',
16
23
  },
17
24
  googleRegion: {
18
25
  type: 'string',
19
- label: 'Google Region',
26
+ label: 'Google Region (Deprecated)',
20
27
  description: 'The Google region.',
21
28
  pullEnvironmentVariable: 'GCP_REGION',
22
- helperText: 'You may also set the GCP_REGION environment variable.',
29
+ helperText: 'Deprecated, use Google API Key instead. You may also set the GCP_REGION environment variable.',
23
30
  },
24
31
  googleApplicationCredentials: {
25
32
  type: 'string',
26
- label: 'Google Application Credentials',
33
+ label: 'Google Application Credentials (Deprecated)',
27
34
  description: 'The path with the JSON file that contains your credentials.',
28
35
  pullEnvironmentVariable: 'GOOGLE_APPLICATION_CREDENTIALS',
29
- helperText: 'You may also set the GOOGLE_APPLICATION_CREDENTIALS environment variable. See https://cloud.google.com/vertex-ai/docs/start/client-libraries for more info.',
36
+ helperText: 'Deprecated, use Google API Key instead. You may also set the GOOGLE_APPLICATION_CREDENTIALS environment variable. See https://cloud.google.com/vertex-ai/docs/start/client-libraries for more info.',
30
37
  },
31
38
  },
32
39
  };
@@ -1,10 +1,10 @@
1
- import { isArrayDataValue, arrayizeDataValue, unwrapDataValue, } from '../../../index.js';
1
+ import {} from '../../../index.js';
2
2
  import { openAiModelOptions, } from '../../../utils/openai.js';
3
3
  import { dedent, newId, coerceTypeOptional, getInputOrData } from '../../../utils/index.js';
4
+ import { arrayizeDataValue, unwrapDataValue } from '../../../model/DataValue.js';
4
5
  import { pluginNodeDefinition } from '../../../model/NodeDefinition.js';
5
6
  import { handleOpenAIError } from '../handleOpenaiError.js';
6
7
  import {} from '../../../model/DataValue.js';
7
- import { match } from 'ts-pattern';
8
8
  const POLL_FREQUENCY = 500;
9
9
  export const RunThreadNodeImpl = {
10
10
  create() {
@@ -36,11 +36,13 @@ const toRecordedEventMap = {
36
36
  outputs,
37
37
  reason,
38
38
  }),
39
- userInput: ({ node, inputs, callback, processId }) => ({
39
+ userInput: ({ node, inputs, callback, processId, inputStrings, renderingType }) => ({
40
40
  nodeId: node.id,
41
41
  inputs,
42
42
  callback,
43
43
  processId,
44
+ inputStrings,
45
+ renderingType,
44
46
  }),
45
47
  partialOutput: ({ node, outputs, index, processId }) => ({
46
48
  nodeId: node.id,
@@ -119,6 +121,7 @@ export class ExecutionRecorder {
119
121
  }
120
122
  this.#events.push(toRecordedEvent(message, data));
121
123
  if (message === 'done' || message === 'abort' || message === 'error') {
124
+ // eslint-disable-next-line @typescript-eslint/no-floating-promises
122
125
  this.#emitter.emit('finish', {
123
126
  recording: this.getRecording(),
124
127
  });
@@ -140,6 +143,7 @@ export class ExecutionRecorder {
140
143
  }
141
144
  this.#events.push(toRecordedEvent(event, data));
142
145
  if (event === 'done' || event === 'abort' || event === 'error') {
146
+ // eslint-disable-next-line @typescript-eslint/no-floating-promises
143
147
  this.#emitter.emit('finish', {
144
148
  recording: this.getRecording(),
145
149
  });
@@ -1,6 +1,6 @@
1
1
  import { match } from 'ts-pattern';
2
2
  import { uint8ArrayToBase64 } from './index.js';
3
- export async function chatMessageToOpenAIChatCompletionMessage(message) {
3
+ export async function chatMessageToOpenAIChatCompletionMessage(message, options) {
4
4
  const onlyStringContent = (message) => {
5
5
  const parts = Array.isArray(message.message) ? message.message : [message.message];
6
6
  const stringContent = parts
@@ -14,7 +14,20 @@ export async function chatMessageToOpenAIChatCompletionMessage(message) {
14
14
  return stringContent;
15
15
  };
16
16
  return match(message)
17
- .with({ type: 'system' }, (m) => ({ role: m.type, content: onlyStringContent(m) }))
17
+ .with({ type: 'system' }, (m) => {
18
+ if (options.isReasoningModel) {
19
+ return {
20
+ role: 'developer',
21
+ content: onlyStringContent(m),
22
+ };
23
+ }
24
+ else {
25
+ return {
26
+ role: m.type,
27
+ content: onlyStringContent(m),
28
+ };
29
+ }
30
+ })
18
31
  .with({ type: 'user' }, async (m) => {
19
32
  const parts = Array.isArray(m.message) ? m.message : [m.message];
20
33
  if (parts.length === 1 && typeof parts[0] === 'string') {
@@ -268,7 +268,7 @@ function coerceToBinary(value) {
268
268
  if (value.type === 'number') {
269
269
  return new Uint8Array([value.value]);
270
270
  }
271
- if (value.type === 'audio' || value.type === 'image') {
271
+ if (value.type === 'audio' || value.type === 'image' || value.type === 'document') {
272
272
  return value.value.data;
273
273
  }
274
274
  return new TextEncoder().encode(JSON.stringify(value.value));