@alpic80/rivet-core 1.19.1-aidon.3 → 1.24.0-aidon.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (129) hide show
  1. package/README.md +4 -0
  2. package/dist/cjs/bundle.cjs +4512 -1240
  3. package/dist/cjs/bundle.cjs.map +4 -4
  4. package/dist/esm/api/createProcessor.js +8 -17
  5. package/dist/esm/api/looseDataValue.js +16 -0
  6. package/dist/esm/exports.js +2 -0
  7. package/dist/esm/integrations/CodeRunner.js +36 -0
  8. package/dist/esm/integrations/DatasetProvider.js +1 -1
  9. package/dist/esm/integrations/GptTokenizerTokenizer.js +7 -4
  10. package/dist/esm/integrations/openai/OpenAIEmbeddingGenerator.js +1 -1
  11. package/dist/esm/model/DataValue.js +14 -2
  12. package/dist/esm/model/GraphProcessor.js +276 -107
  13. package/dist/esm/model/NodeBase.js +11 -1
  14. package/dist/esm/model/NodeImpl.js +8 -0
  15. package/dist/esm/model/Nodes.js +31 -4
  16. package/dist/esm/model/ProjectReferenceLoader.js +1 -0
  17. package/dist/esm/model/nodes/AssembleMessageNode.js +12 -2
  18. package/dist/esm/model/nodes/AssemblePromptNode.js +22 -0
  19. package/dist/esm/model/nodes/CallGraphNode.js +3 -4
  20. package/dist/esm/model/nodes/ChatLoopNode.js +150 -0
  21. package/dist/esm/model/nodes/ChatNode.js +7 -934
  22. package/dist/esm/model/nodes/ChatNodeBase.js +1277 -0
  23. package/dist/esm/model/nodes/ChunkNode.js +2 -2
  24. package/dist/esm/model/nodes/CodeNode.js +40 -5
  25. package/dist/esm/model/nodes/CronNode.js +248 -0
  26. package/dist/esm/model/nodes/DelegateFunctionCallNode.js +37 -12
  27. package/dist/esm/model/nodes/DestructureNode.js +1 -1
  28. package/dist/esm/model/nodes/DocumentNode.js +183 -0
  29. package/dist/esm/model/nodes/ExtractJsonNode.js +4 -4
  30. package/dist/esm/model/nodes/ExtractRegexNode.js +10 -11
  31. package/dist/esm/model/nodes/GetAllDatasetsNode.js +1 -1
  32. package/dist/esm/model/nodes/GetEmbeddingNode.js +1 -1
  33. package/dist/esm/model/nodes/HttpCallNode.js +3 -1
  34. package/dist/esm/model/nodes/IfNode.js +5 -0
  35. package/dist/esm/model/nodes/LoopControllerNode.js +1 -1
  36. package/dist/esm/model/nodes/LoopUntilNode.js +214 -0
  37. package/dist/esm/model/nodes/ObjectNode.js +1 -1
  38. package/dist/esm/model/nodes/PromptNode.js +29 -6
  39. package/dist/esm/model/nodes/RaceInputsNode.js +1 -2
  40. package/dist/esm/model/nodes/ReadAllFilesNode.js +210 -0
  41. package/dist/esm/model/nodes/ReadDirectoryNode.js +31 -25
  42. package/dist/esm/model/nodes/ReferencedGraphAliasNode.js +199 -0
  43. package/dist/esm/model/nodes/ReplaceDatasetNode.js +1 -1
  44. package/dist/esm/model/nodes/SliceNode.js +0 -1
  45. package/dist/esm/model/nodes/SplitNode.js +1 -1
  46. package/dist/esm/model/nodes/SubGraphNode.js +0 -1
  47. package/dist/esm/model/nodes/TextNode.js +9 -4
  48. package/dist/esm/model/nodes/ToMarkdownTableNode.js +119 -0
  49. package/dist/esm/model/nodes/ToTreeNode.js +133 -0
  50. package/dist/esm/model/nodes/{GptFunctionNode.js → ToolNode.js} +10 -10
  51. package/dist/esm/model/nodes/UserInputNode.js +10 -12
  52. package/dist/esm/model/nodes/WriteFileNode.js +147 -0
  53. package/dist/esm/native/BrowserNativeApi.js +16 -1
  54. package/dist/esm/plugins/aidon/nodes/ChatAidonNode.js +5 -5
  55. package/dist/esm/plugins/anthropic/anthropic.js +29 -14
  56. package/dist/esm/plugins/anthropic/fetchEventSource.js +3 -2
  57. package/dist/esm/plugins/anthropic/nodes/ChatAnthropicNode.js +264 -147
  58. package/dist/esm/plugins/anthropic/plugin.js +9 -1
  59. package/dist/esm/plugins/assemblyAi/LemurQaNode.js +1 -1
  60. package/dist/esm/plugins/assemblyAi/LemurSummaryNode.js +1 -1
  61. package/dist/esm/plugins/gentrace/plugin.js +6 -6
  62. package/dist/esm/plugins/google/google.js +120 -6
  63. package/dist/esm/plugins/google/nodes/ChatGoogleNode.js +219 -56
  64. package/dist/esm/plugins/google/plugin.js +13 -6
  65. package/dist/esm/plugins/openai/nodes/RunThreadNode.js +2 -2
  66. package/dist/esm/plugins/openai/nodes/ThreadMessageNode.js +1 -1
  67. package/dist/esm/recording/ExecutionRecorder.js +59 -4
  68. package/dist/esm/utils/base64.js +13 -0
  69. package/dist/esm/utils/chatMessageToOpenAIChatCompletionMessage.js +15 -2
  70. package/dist/esm/utils/coerceType.js +4 -1
  71. package/dist/esm/utils/fetchEventSource.js +1 -1
  72. package/dist/esm/utils/interpolation.js +108 -3
  73. package/dist/esm/utils/openai.js +106 -50
  74. package/dist/esm/utils/paths.js +80 -0
  75. package/dist/esm/utils/serialization/serialization_v4.js +5 -0
  76. package/dist/types/api/createProcessor.d.ts +11 -5
  77. package/dist/types/api/looseDataValue.d.ts +4 -0
  78. package/dist/types/api/streaming.d.ts +1 -1
  79. package/dist/types/exports.d.ts +2 -0
  80. package/dist/types/integrations/CodeRunner.d.ts +18 -0
  81. package/dist/types/integrations/DatasetProvider.d.ts +1 -1
  82. package/dist/types/model/DataValue.d.ts +29 -6
  83. package/dist/types/model/EditorDefinition.d.ts +6 -1
  84. package/dist/types/model/GraphProcessor.d.ts +14 -7
  85. package/dist/types/model/NodeBase.d.ts +4 -0
  86. package/dist/types/model/NodeImpl.d.ts +5 -4
  87. package/dist/types/model/Nodes.d.ts +13 -4
  88. package/dist/types/model/ProcessContext.d.ts +16 -1
  89. package/dist/types/model/Project.d.ts +19 -7
  90. package/dist/types/model/ProjectReferenceLoader.d.ts +5 -0
  91. package/dist/types/model/RivetPlugin.d.ts +6 -0
  92. package/dist/types/model/RivetUIContext.d.ts +5 -1
  93. package/dist/types/model/Settings.d.ts +1 -0
  94. package/dist/types/model/nodes/AssemblePromptNode.d.ts +4 -1
  95. package/dist/types/model/nodes/ChatLoopNode.d.ts +21 -0
  96. package/dist/types/model/nodes/ChatNode.d.ts +2 -62
  97. package/dist/types/model/nodes/ChatNodeBase.d.ts +85 -0
  98. package/dist/types/model/nodes/CodeNode.d.ts +8 -2
  99. package/dist/types/model/nodes/CronNode.d.ts +34 -0
  100. package/dist/types/model/nodes/DelegateFunctionCallNode.d.ts +1 -0
  101. package/dist/types/model/nodes/DocumentNode.d.ts +28 -0
  102. package/dist/types/model/nodes/GetAllDatasetsNode.d.ts +2 -2
  103. package/dist/types/model/nodes/LoopUntilNode.d.ts +32 -0
  104. package/dist/types/model/nodes/ObjectNode.d.ts +2 -2
  105. package/dist/types/model/nodes/PromptNode.d.ts +2 -0
  106. package/dist/types/model/nodes/RaceInputsNode.d.ts +1 -2
  107. package/dist/types/model/nodes/ReadAllFilesNode.d.ts +30 -0
  108. package/dist/types/model/nodes/ReadDirectoryNode.d.ts +1 -1
  109. package/dist/types/model/nodes/ReferencedGraphAliasNode.d.ts +31 -0
  110. package/dist/types/model/nodes/SplitNode.d.ts +2 -2
  111. package/dist/types/model/nodes/ToMarkdownTableNode.d.ts +19 -0
  112. package/dist/types/model/nodes/ToTreeNode.d.ts +21 -0
  113. package/dist/types/model/nodes/UserInputNode.d.ts +2 -3
  114. package/dist/types/model/nodes/WriteFileNode.d.ts +23 -0
  115. package/dist/types/native/BrowserNativeApi.d.ts +8 -5
  116. package/dist/types/native/NativeApi.d.ts +12 -1
  117. package/dist/types/plugins/anthropic/anthropic.d.ts +94 -13
  118. package/dist/types/plugins/anthropic/nodes/ChatAnthropicNode.d.ts +7 -2
  119. package/dist/types/plugins/google/google.d.ts +101 -18
  120. package/dist/types/plugins/google/nodes/ChatGoogleNode.d.ts +3 -2
  121. package/dist/types/recording/RecordedEvents.d.ts +3 -0
  122. package/dist/types/utils/base64.d.ts +2 -1
  123. package/dist/types/utils/chatMessageToOpenAIChatCompletionMessage.d.ts +3 -1
  124. package/dist/types/utils/interpolation.d.ts +3 -0
  125. package/dist/types/utils/openai.d.ts +127 -21
  126. package/dist/types/utils/paths.d.ts +8 -0
  127. package/dist/types/utils/serialization/serialization_v3.d.ts +1 -0
  128. package/package.json +15 -11
  129. /package/dist/types/model/nodes/{GptFunctionNode.d.ts → ToolNode.d.ts} +0 -0
@@ -1,4 +1,5 @@
1
- export const googleModels = {
1
+ import {} from '@google/generative-ai';
2
+ export const googleModelsDeprecated = {
2
3
  'gemini-pro': {
3
4
  maxTokens: 32760,
4
5
  cost: {
@@ -16,16 +17,130 @@ export const googleModels = {
16
17
  displayName: 'Gemini Pro Vision',
17
18
  },
18
19
  };
19
- export const googleModelOptions = Object.entries(googleModels).map(([id, { displayName }]) => ({
20
+ export const generativeAiGoogleModels = {
21
+ 'gemini-2.0-flash-001': {
22
+ maxTokens: 1048576,
23
+ cost: {
24
+ prompt: 0.15 / 1000,
25
+ completion: 0.6 / 1000,
26
+ },
27
+ displayName: 'Gemini 2.0 Flash',
28
+ },
29
+ 'gemini-2.0-pro-exp-02-05': {
30
+ maxTokens: 2097152,
31
+ cost: {
32
+ prompt: 0, // Unknown
33
+ completion: 0, // Unknown
34
+ },
35
+ displayName: 'Gemini 2.0 Pro',
36
+ },
37
+ 'gemini-2.5-pro-exp-03-25': {
38
+ maxTokens: 1000000,
39
+ cost: {
40
+ prompt: 0, // Unknown
41
+ completion: 0, // Unknown
42
+ },
43
+ displayName: 'Gemini 2.5 Pro Experimental',
44
+ },
45
+ 'gemini-2.0-flash-lite-preview-02-05': {
46
+ maxTokens: 1048576,
47
+ cost: {
48
+ prompt: 0.075 / 1000,
49
+ completion: 0.3 / 1000,
50
+ },
51
+ displayName: 'Gemini 2.0 Flash Lite',
52
+ },
53
+ 'gemini-2.0-flash-thinking-exp-01-21': {
54
+ maxTokens: 1048576,
55
+ cost: {
56
+ prompt: 0, // Unknown
57
+ completion: 0, // Unknown
58
+ },
59
+ displayName: 'Gemini 2.0 Flash Thinking',
60
+ },
61
+ 'gemini-1.5-flash': {
62
+ maxTokens: 1048576,
63
+ cost: {
64
+ prompt: 0, // It's per-character wtf
65
+ completion: 0, // It's per-character
66
+ },
67
+ displayName: 'Gemini 1.5 Flash',
68
+ },
69
+ 'gemini-1.5-pro': {
70
+ maxTokens: 2097152,
71
+ cost: {
72
+ prompt: 0, // It's per-character wtf
73
+ completion: 0, // It's per-character
74
+ },
75
+ displayName: 'Gemini 1.5 Pro',
76
+ },
77
+ 'gemini-1.0-pro': {
78
+ maxTokens: 32760,
79
+ cost: {
80
+ prompt: 0, // It's per-character wtf
81
+ completion: 0, // 1It's per-character
82
+ },
83
+ displayName: 'Gemini 1.0 Pro',
84
+ },
85
+ 'gemini-1.0-pro-vision': {
86
+ maxTokens: 16384,
87
+ cost: {
88
+ prompt: 0, // It's per-character wtf
89
+ completion: 0, // It's per-character
90
+ },
91
+ displayName: 'Gemini 1.0 Pro Vision',
92
+ },
93
+ };
94
+ export const googleModelOptionsDeprecated = Object.entries(googleModelsDeprecated).map(([id, { displayName }]) => ({
95
+ value: id,
96
+ label: displayName,
97
+ }));
98
+ export const generativeAiOptions = Object.entries(generativeAiGoogleModels).map(([id, { displayName }]) => ({
20
99
  value: id,
21
100
  label: displayName,
22
101
  }));
102
+ export async function* streamGenerativeAi({ apiKey, model, systemPrompt, prompt, maxOutputTokens, temperature, topP, topK, signal, tools, }) {
103
+ const { GoogleGenerativeAI } = await import('@google/generative-ai');
104
+ const genAi = new GoogleGenerativeAI(apiKey);
105
+ const genaiModel = genAi.getGenerativeModel({
106
+ model,
107
+ systemInstruction: systemPrompt,
108
+ generationConfig: {
109
+ maxOutputTokens,
110
+ temperature,
111
+ topP,
112
+ topK,
113
+ },
114
+ tools,
115
+ });
116
+ const result = await genaiModel.generateContentStream({
117
+ contents: prompt,
118
+ }, { signal });
119
+ for await (const chunk of result.stream) {
120
+ const outChunk = {
121
+ completion: undefined,
122
+ finish_reason: undefined,
123
+ function_calls: undefined,
124
+ model,
125
+ };
126
+ const functionCalls = chunk.functionCalls();
127
+ if (functionCalls) {
128
+ outChunk.function_calls = functionCalls;
129
+ }
130
+ if (chunk.candidates) {
131
+ outChunk.completion = chunk.candidates[0]?.content?.parts?.[0]?.text;
132
+ outChunk.finish_reason = chunk.candidates[0]?.finishReason;
133
+ }
134
+ if (outChunk.completion || outChunk.function_calls) {
135
+ yield outChunk;
136
+ }
137
+ }
138
+ }
23
139
  export async function* streamChatCompletions({ project, location, applicationCredentials, model, signal, max_output_tokens, temperature, top_p, top_k, prompt, }) {
24
- const defaultSignal = new AbortController().signal;
25
140
  // If you import normally, the Google auth library throws a fit.
26
141
  const { VertexAI } = await import('@google-cloud/vertexai');
27
142
  // Can't find a way to pass the credentials path in
28
- process.env['GOOGLE_APPLICATION_CREDENTIALS'] = applicationCredentials;
143
+ process.env.GOOGLE_APPLICATION_CREDENTIALS = applicationCredentials;
29
144
  const vertexAi = new VertexAI({ project, location });
30
145
  const generativeModel = vertexAi.preview.getGenerativeModel({
31
146
  model,
@@ -37,11 +152,10 @@ export async function* streamChatCompletions({ project, location, applicationCre
37
152
  },
38
153
  });
39
154
  const response = await generativeModel.generateContentStream({
40
- contents: prompt,
155
+ contents: prompt, // crazy type stuff but... this is good enough, this is legacy
41
156
  });
42
157
  let hadChunks = false;
43
158
  for await (const chunk of response.stream) {
44
- console.log('streaming google responses');
45
159
  hadChunks = true;
46
160
  if (!signal?.aborted && chunk.candidates[0]?.content.parts[0]?.text) {
47
161
  yield {
@@ -1,5 +1,5 @@
1
- import { uint8ArrayToBase64, } from '../../../index.js';
2
- import { googleModelOptions, googleModels, streamChatCompletions, } from '../google.js';
1
+ import {} from '../../../index.js';
2
+ import { streamChatCompletions, streamGenerativeAi, generativeAiGoogleModels, generativeAiOptions, } from '../google.js';
3
3
  import { nanoid } from 'nanoid/non-secure';
4
4
  import { dedent } from 'ts-dedent';
5
5
  import retry from 'p-retry';
@@ -7,8 +7,12 @@ import { match } from 'ts-pattern';
7
7
  import { coerceType, coerceTypeOptional } from '../../../utils/coerceType.js';
8
8
  import { addWarning } from '../../../utils/outputs.js';
9
9
  import { getError } from '../../../utils/errors.js';
10
+ import { uint8ArrayToBase64 } from '../../../utils/base64.js';
10
11
  import { pluginNodeDefinition } from '../../../model/NodeDefinition.js';
11
12
  import { getScalarTypeOf, isArrayDataValue } from '../../../model/DataValue.js';
13
+ import { getInputOrData } from '../../../utils/inputs.js';
14
+ import { SchemaType, } from '@google/generative-ai';
15
+ import { mapValues } from 'lodash-es';
12
16
  // Temporary
13
17
  const cache = new Map();
14
18
  export const ChatGoogleNodeImpl = {
@@ -23,7 +27,7 @@ export const ChatGoogleNodeImpl = {
23
27
  width: 275,
24
28
  },
25
29
  data: {
26
- model: 'gemini-pro',
30
+ model: 'gemini-2.0-flash-001',
27
31
  useModelInput: false,
28
32
  temperature: 0.5,
29
33
  useTemperatureInput: false,
@@ -37,12 +41,20 @@ export const ChatGoogleNodeImpl = {
37
41
  useMaxTokensInput: false,
38
42
  cache: false,
39
43
  useAsGraphPartialOutput: true,
44
+ useToolCalling: false,
40
45
  },
41
46
  };
42
47
  return chartNode;
43
48
  },
44
49
  getInputDefinitions(data) {
45
50
  const inputs = [];
51
+ inputs.push({
52
+ id: 'systemPrompt',
53
+ title: 'System Prompt',
54
+ dataType: 'string',
55
+ required: false,
56
+ description: 'An optional system prompt for the model to use.',
57
+ });
46
58
  if (data.useModelInput) {
47
59
  inputs.push({
48
60
  id: 'model',
@@ -79,6 +91,14 @@ export const ChatGoogleNodeImpl = {
79
91
  title: 'Max Tokens',
80
92
  });
81
93
  }
94
+ if (data.useToolCalling) {
95
+ inputs.push({
96
+ dataType: 'gpt-function[]',
97
+ id: 'functions',
98
+ title: 'Tools',
99
+ description: 'Tools available for the model to call.',
100
+ });
101
+ }
82
102
  inputs.push({
83
103
  dataType: ['chat-message', 'chat-message[]'],
84
104
  id: 'prompt',
@@ -105,11 +125,19 @@ export const ChatGoogleNodeImpl = {
105
125
  title: 'All Messages',
106
126
  description: 'All messages, with the response appended.',
107
127
  });
128
+ if (data.useToolCalling) {
129
+ outputs.push({
130
+ dataType: 'object[]',
131
+ id: 'function-calls',
132
+ title: 'Tool Calls',
133
+ description: 'Tool calls made by the model.',
134
+ });
135
+ }
108
136
  return outputs;
109
137
  },
110
138
  getBody(data) {
111
139
  return dedent `
112
- ${googleModels[data.model]?.displayName ?? `Google (${data.model})`}
140
+ ${generativeAiGoogleModels[data.model]?.displayName ?? `Google (${data.model})`}
113
141
  ${data.useTopP
114
142
  ? `Top P: ${data.useTopPInput ? '(Using Input)' : data.top_p}`
115
143
  : `Temperature: ${data.useTemperatureInput ? '(Using Input)' : data.temperature}`}
@@ -123,7 +151,7 @@ export const ChatGoogleNodeImpl = {
123
151
  label: 'Model',
124
152
  dataKey: 'model',
125
153
  useInputToggleDataKey: 'useModelInput',
126
- options: googleModelOptions,
154
+ options: generativeAiOptions,
127
155
  },
128
156
  {
129
157
  type: 'number',
@@ -158,6 +186,11 @@ export const ChatGoogleNodeImpl = {
158
186
  max: Number.MAX_SAFE_INTEGER,
159
187
  step: 1,
160
188
  },
189
+ {
190
+ type: 'toggle',
191
+ label: 'Enable Tool Calling',
192
+ dataKey: 'useToolCalling',
193
+ },
161
194
  {
162
195
  type: 'toggle',
163
196
  label: 'Cache (same inputs, same outputs)',
@@ -182,29 +215,23 @@ export const ChatGoogleNodeImpl = {
182
215
  },
183
216
  async process(data, inputs, context) {
184
217
  const output = {};
185
- const rawModel = data.useModelInput
186
- ? coerceTypeOptional(inputs['model'], 'string') ?? data.model
187
- : data.model;
218
+ const systemPrompt = coerceTypeOptional(inputs['systemPrompt'], 'string');
219
+ const rawModel = getInputOrData(data, inputs, 'model');
188
220
  const model = rawModel;
189
- const temperature = data.useTemperatureInput
190
- ? coerceTypeOptional(inputs['temperature'], 'number') ?? data.temperature
191
- : data.temperature;
192
- const topP = data.useTopPInput ? coerceTypeOptional(inputs['top_p'], 'number') ?? data.top_p : data.top_p;
193
- const useTopP = data.useUseTopPInput
194
- ? coerceTypeOptional(inputs['useTopP'], 'boolean') ?? data.useTopP
195
- : data.useTopP;
221
+ const temperature = getInputOrData(data, inputs, 'temperature', 'number');
222
+ const topP = getInputOrData(data, inputs, 'top_p', 'number');
223
+ const useTopP = getInputOrData(data, inputs, 'useTopP', 'boolean');
196
224
  const { messages } = getChatGoogleNodeMessages(inputs);
197
- const prompt = await Promise.all(messages.map(async (message) => {
198
- return {
199
- role: message.type === 'user' ? 'user' : 'assistant',
200
- parts: await Promise.all([message.message].flat().map(async (part) => {
225
+ let prompt = await Promise.all(messages.map(async (message) => {
226
+ if (message.type === 'user' || message.type === 'assistant') {
227
+ const parts = await Promise.all([message.message].flat().map(async (part) => {
201
228
  if (typeof part === 'string') {
202
229
  return { text: part };
203
230
  }
204
231
  else if (part.type === 'image') {
205
232
  return {
206
- inline_data: {
207
- mime_type: part.mediaType,
233
+ inlineData: {
234
+ mimeType: part.mediaType,
208
235
  data: (await uint8ArrayToBase64(part.data)),
209
236
  },
210
237
  };
@@ -212,9 +239,61 @@ export const ChatGoogleNodeImpl = {
212
239
  else {
213
240
  throw new Error(`Google Vertex AI does not support message parts of type ${part.type}`);
214
241
  }
215
- })),
216
- };
242
+ }));
243
+ if (message.type === 'assistant' && (message.function_calls?.length ?? 0) > 0) {
244
+ if (parts[0].text === '') {
245
+ parts.shift(); // remove empty text part
246
+ }
247
+ for (const call of message.function_calls ?? []) {
248
+ parts.push({
249
+ functionCall: {
250
+ name: call.name,
251
+ args: JSON.parse(call.arguments),
252
+ },
253
+ });
254
+ }
255
+ }
256
+ return {
257
+ role: match(message.type)
258
+ .with('user', () => 'user')
259
+ .with('assistant', () => 'model')
260
+ .exhaustive(),
261
+ parts,
262
+ };
263
+ }
264
+ if (message.type === 'function') {
265
+ return {
266
+ role: 'function',
267
+ parts: [
268
+ {
269
+ functionResponse: {
270
+ name: message.name,
271
+ response: {
272
+ result: typeof message.message === 'string' ? message.message : '',
273
+ },
274
+ },
275
+ },
276
+ ],
277
+ };
278
+ }
279
+ throw new Error(`Google Vertex AI does not support message type ${message.type}`);
217
280
  }));
281
+ // Collapse sequential function responses into a single function response with mutliple parts
282
+ prompt = prompt.reduce((acc, message) => {
283
+ const lastMessage = acc.at(-1);
284
+ // Shouldn't be undefined but not sure if this is where the crash is happening...
285
+ if (lastMessage &&
286
+ message.role === 'function' &&
287
+ lastMessage.role === 'function' &&
288
+ lastMessage?.parts &&
289
+ message.parts) {
290
+ lastMessage.parts.push(...message.parts);
291
+ }
292
+ else {
293
+ acc.push(message);
294
+ }
295
+ return acc;
296
+ }, []);
218
297
  let { maxTokens } = data;
219
298
  const tokenizerInfo = {
220
299
  node: context.node,
@@ -223,25 +302,53 @@ export const ChatGoogleNodeImpl = {
223
302
  };
224
303
  // TODO Better token counting for Google models.
225
304
  const tokenCount = await context.tokenizer.getTokenCountForMessages(messages, undefined, tokenizerInfo);
226
- if (googleModels[model] && tokenCount >= googleModels[model].maxTokens) {
227
- throw new Error(`The model ${model} can only handle ${googleModels[model].maxTokens} tokens, but ${tokenCount} were provided in the prompts alone.`);
305
+ if (generativeAiGoogleModels[model] && tokenCount >= generativeAiGoogleModels[model].maxTokens) {
306
+ throw new Error(`The model ${model} can only handle ${generativeAiGoogleModels[model].maxTokens} tokens, but ${tokenCount} were provided in the prompts alone.`);
228
307
  }
229
- if (googleModels[model] && tokenCount + maxTokens > googleModels[model].maxTokens) {
230
- const message = `The model can only handle a maximum of ${googleModels[model].maxTokens} tokens, but the prompts and max tokens together exceed this limit. The max tokens has been reduced to ${googleModels[model].maxTokens - tokenCount}.`;
308
+ if (generativeAiGoogleModels[model] && tokenCount + maxTokens > generativeAiGoogleModels[model].maxTokens) {
309
+ const message = `The model can only handle a maximum of ${generativeAiGoogleModels[model].maxTokens} tokens, but the prompts and max tokens together exceed this limit. The max tokens has been reduced to ${generativeAiGoogleModels[model].maxTokens - tokenCount}.`;
231
310
  addWarning(output, message);
232
- maxTokens = Math.floor((googleModels[model].maxTokens - tokenCount) * 0.95); // reduce max tokens by 5% to be safe, calculation is a little wrong.
311
+ maxTokens = Math.floor((generativeAiGoogleModels[model].maxTokens - tokenCount) * 0.95); // reduce max tokens by 5% to be safe, calculation is a little wrong.
233
312
  }
234
313
  const project = context.getPluginConfig('googleProjectId');
235
314
  const location = context.getPluginConfig('googleRegion');
236
315
  const applicationCredentials = context.getPluginConfig('googleApplicationCredentials');
237
- if (project == null) {
238
- throw new Error('Google Project ID is not defined.');
239
- }
240
- if (location == null) {
241
- throw new Error('Google Region is not defined.');
316
+ const apiKey = context.getPluginConfig('googleApiKey');
317
+ let tools = [];
318
+ if (data.useToolCalling) {
319
+ const gptTools = coerceTypeOptional(inputs['functions'], 'gpt-function[]') ?? [];
320
+ if (gptTools) {
321
+ tools = [
322
+ {
323
+ functionDeclarations: gptTools.map((tool) => ({
324
+ name: tool.name,
325
+ description: tool.description,
326
+ parameters: Object.keys(tool.parameters.properties).length === 0
327
+ ? undefined
328
+ : {
329
+ type: SchemaType.OBJECT,
330
+ properties: mapValues(tool.parameters.properties, (p) => ({
331
+ // gemini doesn't support union property types, it uses openapi style not jsonschema, what a mess
332
+ type: Array.isArray(p.type) ? p.type.filter((t) => t !== 'null')[0] : p.type,
333
+ description: p.description,
334
+ })),
335
+ required: tool.parameters.required || [],
336
+ },
337
+ })),
338
+ },
339
+ ];
340
+ }
242
341
  }
243
- if (applicationCredentials == null) {
244
- throw new Error('Google Application Credentials is not defined.');
342
+ if (!apiKey) {
343
+ if (project == null) {
344
+ throw new Error('Google Project ID or Google API Key is not defined.');
345
+ }
346
+ if (location == null) {
347
+ throw new Error('Google Region or Google API Key is not defined.');
348
+ }
349
+ if (applicationCredentials == null) {
350
+ throw new Error('Google Application Credentials or Google API Key is not defined.');
351
+ }
245
352
  }
246
353
  try {
247
354
  return await retry(async () => {
@@ -249,8 +356,11 @@ export const ChatGoogleNodeImpl = {
249
356
  prompt,
250
357
  model,
251
358
  temperature: useTopP ? undefined : temperature,
252
- top_p: useTopP ? topP : undefined,
253
- max_output_tokens: maxTokens,
359
+ topP: useTopP ? topP : undefined,
360
+ maxOutputTokens: maxTokens,
361
+ systemPrompt,
362
+ topK: undefined,
363
+ tools,
254
364
  };
255
365
  const cacheKey = JSON.stringify(options);
256
366
  if (data.cache) {
@@ -260,24 +370,59 @@ export const ChatGoogleNodeImpl = {
260
370
  }
261
371
  }
262
372
  const startTime = Date.now();
263
- const chunks = streamChatCompletions({
264
- signal: context.signal,
265
- project,
266
- location,
267
- applicationCredentials,
268
- ...options,
269
- });
373
+ let chunks;
374
+ if (data.useToolCalling && !apiKey) {
375
+ throw new Error('Tool calling is only supported when using a generative API key.');
376
+ }
377
+ if (apiKey) {
378
+ chunks = streamGenerativeAi({
379
+ signal: context.signal,
380
+ model,
381
+ prompt,
382
+ maxOutputTokens: maxTokens,
383
+ temperature: useTopP ? undefined : temperature,
384
+ topP: useTopP ? topP : undefined,
385
+ topK: undefined,
386
+ apiKey,
387
+ systemPrompt,
388
+ tools,
389
+ });
390
+ }
391
+ else {
392
+ chunks = streamChatCompletions({
393
+ signal: context.signal,
394
+ model: model,
395
+ prompt,
396
+ max_output_tokens: maxTokens,
397
+ temperature: useTopP ? undefined : temperature,
398
+ top_p: useTopP ? topP : undefined,
399
+ top_k: undefined,
400
+ project: project,
401
+ location: location,
402
+ applicationCredentials: applicationCredentials,
403
+ });
404
+ }
270
405
  const responseParts = [];
406
+ const functionCalls = [];
271
407
  for await (const chunk of chunks) {
272
- if (!chunk.completion) {
273
- // Could be error for some reason 🤷‍♂️ but ignoring has worked for me so far.
274
- continue;
408
+ if (chunk.completion) {
409
+ responseParts.push(chunk.completion);
410
+ output['response'] = {
411
+ type: 'string',
412
+ value: responseParts.join('').trim(),
413
+ };
414
+ }
415
+ if (chunk.function_calls) {
416
+ functionCalls.push(...chunk.function_calls);
417
+ output['function-calls'] = {
418
+ type: 'object[]',
419
+ value: functionCalls.map((fc) => ({
420
+ id: fc.name,
421
+ name: fc.name,
422
+ arguments: fc.args,
423
+ })),
424
+ };
275
425
  }
276
- responseParts.push(chunk.completion);
277
- output['response'] = {
278
- type: 'string',
279
- value: responseParts.join('').trim(),
280
- };
281
426
  context.onPartialOutputs?.(output);
282
427
  }
283
428
  const endTime = Date.now();
@@ -289,7 +434,13 @@ export const ChatGoogleNodeImpl = {
289
434
  type: 'assistant',
290
435
  message: responseParts.join('').trim() ?? '',
291
436
  function_call: undefined,
292
- function_calls: undefined,
437
+ function_calls: functionCalls.length === 0
438
+ ? undefined
439
+ : functionCalls.map((fc) => ({
440
+ id: fc.name,
441
+ name: fc.name,
442
+ arguments: JSON.stringify(fc.args),
443
+ })),
293
444
  },
294
445
  ],
295
446
  };
@@ -297,7 +448,7 @@ export const ChatGoogleNodeImpl = {
297
448
  type: 'chat-message[]',
298
449
  value: messages,
299
450
  };
300
- if (responseParts.length === 0) {
451
+ if (responseParts.length === 0 && functionCalls.length === 0) {
301
452
  throw new Error('No response from Google');
302
453
  }
303
454
  output['requestTokens'] = { type: 'number', value: tokenCount };
@@ -322,6 +473,15 @@ export const ChatGoogleNodeImpl = {
322
473
  signal: context.signal,
323
474
  onFailedAttempt(err) {
324
475
  context.trace(`ChatGoogleNode failed, retrying: ${err.toString()}`);
476
+ const googleError = err;
477
+ if (googleError.status && googleError.status >= 400 && googleError.status < 500) {
478
+ if (googleError.status === 429) {
479
+ context.trace('Google API rate limit exceeded, retrying...');
480
+ }
481
+ else {
482
+ throw new Error(`Google API error: ${googleError.status} ${googleError.message}`);
483
+ }
484
+ }
325
485
  if (context.signal.aborted) {
326
486
  throw new Error('Aborted');
327
487
  }
@@ -329,8 +489,11 @@ export const ChatGoogleNodeImpl = {
329
489
  });
330
490
  }
331
491
  catch (error) {
332
- context.trace(getError(error).stack ?? 'Missing stack');
333
- throw new Error(`Error processing ChatGoogleNode: ${error.message}`);
492
+ const raisedError = getError(error);
493
+ context.trace(raisedError.stack ?? 'Missing stack');
494
+ const err = new Error(`Error processing ChatGoogleNode: ${raisedError.message}`);
495
+ err.cause = raisedError;
496
+ throw err;
334
497
  }
335
498
  },
336
499
  };
@@ -7,26 +7,33 @@ export const googlePlugin = {
7
7
  register(chatGoogleNode);
8
8
  },
9
9
  configSpec: {
10
+ googleApiKey: {
11
+ type: 'secret',
12
+ label: 'Google API Key',
13
+ description: 'The API key for accessing Google generative AI.',
14
+ pullEnvironmentVariable: 'GOOGLE_GENERATIVE_AI_API_KEY',
15
+ helperText: 'You may also set the GOOGLE_GENERATIVE_AI_API_KEY environment variable.',
16
+ },
10
17
  googleProjectId: {
11
18
  type: 'string',
12
- label: 'Google Project ID',
19
+ label: 'Google Project ID (Deprecated)',
13
20
  description: 'The Google project ID.',
14
21
  pullEnvironmentVariable: 'GCP_PROJECT',
15
- helperText: 'You may also set the GCP_PROJECT environment variable.',
22
+ helperText: 'Deprecated, use Google API Key instead. You may also set the GCP_PROJECT environment variable.',
16
23
  },
17
24
  googleRegion: {
18
25
  type: 'string',
19
- label: 'Google Region',
26
+ label: 'Google Region (Deprecated)',
20
27
  description: 'The Google region.',
21
28
  pullEnvironmentVariable: 'GCP_REGION',
22
- helperText: 'You may also set the GCP_REGION environment variable.',
29
+ helperText: 'Deprecated, use Google API Key instead. You may also set the GCP_REGION environment variable.',
23
30
  },
24
31
  googleApplicationCredentials: {
25
32
  type: 'string',
26
- label: 'Google Application Credentials',
33
+ label: 'Google Application Credentials (Deprecated)',
27
34
  description: 'The path with the JSON file that contains your credentials.',
28
35
  pullEnvironmentVariable: 'GOOGLE_APPLICATION_CREDENTIALS',
29
- helperText: 'You may also set the GOOGLE_APPLICATION_CREDENTIALS environment variable. See https://cloud.google.com/vertex-ai/docs/start/client-libraries for more info.',
36
+ helperText: 'Deprecated, use Google API Key instead. You may also set the GOOGLE_APPLICATION_CREDENTIALS environment variable. See https://cloud.google.com/vertex-ai/docs/start/client-libraries for more info.',
30
37
  },
31
38
  },
32
39
  };
@@ -1,10 +1,10 @@
1
- import { isArrayDataValue, arrayizeDataValue, unwrapDataValue, } from '../../../index.js';
1
+ import {} from '../../../index.js';
2
2
  import { openAiModelOptions, } from '../../../utils/openai.js';
3
3
  import { dedent, newId, coerceTypeOptional, getInputOrData } from '../../../utils/index.js';
4
+ import { arrayizeDataValue, unwrapDataValue } from '../../../model/DataValue.js';
4
5
  import { pluginNodeDefinition } from '../../../model/NodeDefinition.js';
5
6
  import { handleOpenAIError } from '../handleOpenaiError.js';
6
7
  import {} from '../../../model/DataValue.js';
7
- import { match } from 'ts-pattern';
8
8
  const POLL_FREQUENCY = 500;
9
9
  export const RunThreadNodeImpl = {
10
10
  create() {
@@ -1,5 +1,5 @@
1
1
  import {} from '../../../index.js';
2
- import { dedent, newId, coerceTypeOptional, getInputOrData, coerceType } from '../../../utils/index.js';
2
+ import { newId, coerceTypeOptional, getInputOrData, coerceType } from '../../../utils/index.js';
3
3
  import { interpolate } from '../../../utils/interpolation.js';
4
4
  import { pluginNodeDefinition } from '../../../model/NodeDefinition.js';
5
5
  import { mapValues } from 'lodash-es';