@alpic80/rivet-core 1.24.0-aidon.5 → 1.24.2-aidon.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. package/README.md +9 -6
  2. package/dist/cjs/bundle.cjs +1400 -263
  3. package/dist/cjs/bundle.cjs.map +4 -4
  4. package/dist/esm/api/createProcessor.js +2 -0
  5. package/dist/esm/api/streaming.js +44 -2
  6. package/dist/esm/exports.js +1 -0
  7. package/dist/esm/integrations/CodeRunner.js +10 -2
  8. package/dist/esm/integrations/mcp/MCPBase.js +87 -0
  9. package/dist/esm/integrations/mcp/MCPProvider.js +23 -0
  10. package/dist/esm/integrations/mcp/MCPUtils.js +33 -0
  11. package/dist/esm/model/GraphProcessor.js +3 -0
  12. package/dist/esm/model/NodeRegistration.js +0 -1
  13. package/dist/esm/model/Nodes.js +9 -0
  14. package/dist/esm/model/nodes/ChatNodeBase.js +1 -1
  15. package/dist/esm/model/nodes/CodeNode.js +1 -1
  16. package/dist/esm/model/nodes/GetAllDatasetsNode.js +1 -1
  17. package/dist/esm/model/nodes/GraphInputNode.js +2 -0
  18. package/dist/esm/model/nodes/MCPDiscoveryNode.js +210 -0
  19. package/dist/esm/model/nodes/MCPGetPromptNode.js +233 -0
  20. package/dist/esm/model/nodes/MCPToolCallNode.js +261 -0
  21. package/dist/esm/model/nodes/ObjectNode.js +42 -21
  22. package/dist/esm/model/nodes/PromptNode.js +1 -1
  23. package/dist/esm/model/nodes/TextNode.js +13 -2
  24. package/dist/esm/plugins/anthropic/anthropic.js +22 -3
  25. package/dist/esm/plugins/anthropic/nodes/ChatAnthropicNode.js +33 -3
  26. package/dist/esm/plugins/google/google.js +29 -14
  27. package/dist/esm/plugins/google/nodes/ChatGoogleNode.js +70 -5
  28. package/dist/esm/utils/interpolation.js +155 -17
  29. package/dist/esm/utils/openai.js +24 -0
  30. package/dist/types/api/createProcessor.d.ts +3 -2
  31. package/dist/types/api/streaming.d.ts +8 -1
  32. package/dist/types/exports.d.ts +1 -0
  33. package/dist/types/integrations/CodeRunner.d.ts +4 -3
  34. package/dist/types/integrations/mcp/MCPBase.d.ts +18 -0
  35. package/dist/types/integrations/mcp/MCPProvider.d.ts +153 -0
  36. package/dist/types/integrations/mcp/MCPUtils.d.ts +9 -0
  37. package/dist/types/model/GraphProcessor.d.ts +1 -1
  38. package/dist/types/model/Nodes.d.ts +13 -2
  39. package/dist/types/model/ProcessContext.d.ts +5 -1
  40. package/dist/types/model/Project.d.ts +2 -0
  41. package/dist/types/model/nodes/GetAllDatasetsNode.d.ts +2 -2
  42. package/dist/types/model/nodes/MCPDiscoveryNode.d.ts +9 -0
  43. package/dist/types/model/nodes/MCPGetPromptNode.d.ts +23 -0
  44. package/dist/types/model/nodes/MCPToolCallNode.d.ts +26 -0
  45. package/dist/types/model/nodes/ObjectNode.d.ts +3 -2
  46. package/dist/types/model/nodes/TextNode.d.ts +2 -1
  47. package/dist/types/plugins/anthropic/anthropic.d.ts +21 -3
  48. package/dist/types/plugins/anthropic/nodes/ChatAnthropicNode.d.ts +5 -0
  49. package/dist/types/plugins/google/google.d.ts +12 -2
  50. package/dist/types/plugins/google/nodes/ChatGoogleNode.d.ts +7 -0
  51. package/dist/types/utils/interpolation.d.ts +6 -1
  52. package/dist/types/utils/openai.d.ts +24 -0
  53. package/package.json +3 -3
@@ -80,12 +80,28 @@ export const anthropicModels = {
80
80
  },
81
81
  displayName: 'Claude 3.7 Sonnet',
82
82
  },
83
+ 'claude-sonnet-4-20250514': {
84
+ maxTokens: 200_000,
85
+ cost: {
86
+ prompt: 3e-6,
87
+ completion: 3.75e-6,
88
+ },
89
+ displayName: 'Claude Sonnet 4',
90
+ },
91
+ 'claude-opus-4-20250514': {
92
+ maxTokens: 200_000,
93
+ cost: {
94
+ prompt: 15e-6,
95
+ completion: 18.75e-6,
96
+ },
97
+ displayName: 'Claude Opus 4',
98
+ },
83
99
  };
84
100
  export const anthropicModelOptions = Object.entries(anthropicModels).map(([id, { displayName }]) => ({
85
101
  value: id,
86
102
  label: displayName,
87
103
  }));
88
- export async function* streamChatCompletions({ apiEndpoint, apiKey, signal, ...rest }) {
104
+ export async function* streamChatCompletions({ apiEndpoint, apiKey, signal, additionalHeaders, ...rest }) {
89
105
  const defaultSignal = new AbortController().signal;
90
106
  const response = await fetchEventSource(`${apiEndpoint}/completions`, {
91
107
  method: 'POST',
@@ -94,6 +110,7 @@ export async function* streamChatCompletions({ apiEndpoint, apiKey, signal, ...r
94
110
  'x-api-key': apiKey,
95
111
  'anthropic-version': '2023-06-01',
96
112
  'anthropic-dangerous-direct-browser-access': 'true',
113
+ ...additionalHeaders,
97
114
  },
98
115
  body: JSON.stringify({
99
116
  ...rest,
@@ -125,7 +142,7 @@ export async function* streamChatCompletions({ apiEndpoint, apiKey, signal, ...r
125
142
  throw new AnthropicError(`No chunks received. Response: ${JSON.stringify(responseJson)}`, response, responseJson);
126
143
  }
127
144
  }
128
- export async function callMessageApi({ apiEndpoint, apiKey, signal, tools, beta, ...rest }) {
145
+ export async function callMessageApi({ apiEndpoint, apiKey, signal, tools, beta, additionalHeaders, ...rest }) {
129
146
  const defaultSignal = new AbortController().signal;
130
147
  const response = await fetch(`${apiEndpoint}/messages`, {
131
148
  method: 'POST',
@@ -135,6 +152,7 @@ export async function callMessageApi({ apiEndpoint, apiKey, signal, tools, beta,
135
152
  'anthropic-version': '2023-06-01',
136
153
  'anthropic-dangerous-direct-browser-access': 'true',
137
154
  ...(beta ? { 'anthropic-beta': beta } : {}),
155
+ ...additionalHeaders,
138
156
  },
139
157
  body: JSON.stringify({
140
158
  ...rest,
@@ -149,7 +167,7 @@ export async function callMessageApi({ apiEndpoint, apiKey, signal, tools, beta,
149
167
  }
150
168
  return responseJson;
151
169
  }
152
- export async function* streamMessageApi({ apiEndpoint, apiKey, signal, beta, ...rest }) {
170
+ export async function* streamMessageApi({ apiEndpoint, apiKey, signal, beta, additionalHeaders, ...rest }) {
153
171
  // Use the Messages API for Claude 3 models
154
172
  const defaultSignal = new AbortController().signal;
155
173
  const response = await fetchEventSource(`${apiEndpoint}/messages`, {
@@ -160,6 +178,7 @@ export async function* streamMessageApi({ apiEndpoint, apiKey, signal, beta, ...
160
178
  'anthropic-version': '2023-06-01',
161
179
  'anthropic-dangerous-direct-browser-access': 'true',
162
180
  ...(beta ? { 'anthropic-beta': beta } : {}),
181
+ ...additionalHeaders,
163
182
  },
164
183
  body: JSON.stringify({
165
184
  ...rest,
@@ -12,7 +12,7 @@ import { getScalarTypeOf, isArrayDataValue } from '../../../model/DataValue.js';
12
12
  import { assertNever } from '../../../utils/assertNever.js';
13
13
  import { isNotNull } from '../../../utils/genericUtilFunctions.js';
14
14
  import { uint8ArrayToBase64 } from '../../../utils/base64.js';
15
- import { getInputOrData } from '../../../utils/inputs.js';
15
+ import { getInputOrData, cleanHeaders } from '../../../utils/inputs.js';
16
16
  // Temporary
17
17
  const cache = new Map();
18
18
  export const ChatAnthropicNodeImpl = {
@@ -27,7 +27,7 @@ export const ChatAnthropicNodeImpl = {
27
27
  width: 275,
28
28
  },
29
29
  data: {
30
- model: 'claude-3-7-sonnet-latest',
30
+ model: 'claude-sonnet-4-20250514',
31
31
  useModelInput: false,
32
32
  temperature: 0.5,
33
33
  useTemperatureInput: false,
@@ -120,6 +120,14 @@ export const ChatAnthropicNodeImpl = {
120
120
  coerced: false,
121
121
  });
122
122
  }
123
+ if (data.useHeadersInput) {
124
+ inputs.push({
125
+ dataType: 'object',
126
+ id: 'headers',
127
+ title: 'Headers',
128
+ description: 'Additional headers to send to the API.',
129
+ });
130
+ }
123
131
  return inputs;
124
132
  },
125
133
  getOutputDefinitions(data) {
@@ -267,6 +275,14 @@ export const ChatAnthropicNodeImpl = {
267
275
  useInputToggleDataKey: 'useOverrideModelInput',
268
276
  helperMessage: 'Overrides the AI model used for the chat node to this value.',
269
277
  },
278
+ {
279
+ type: 'keyValuePair',
280
+ label: 'Headers',
281
+ dataKey: 'headers',
282
+ useInputToggleDataKey: 'useHeadersInput',
283
+ keyPlaceholder: 'Header',
284
+ helperMessage: 'Additional headers to send to the API.',
285
+ },
270
286
  ],
271
287
  },
272
288
  ];
@@ -343,6 +359,18 @@ export const ChatAnthropicNodeImpl = {
343
359
  addWarning(output, message);
344
360
  maxTokens = Math.floor((modelInfo.maxTokens - tokenCountEstimate) * 0.95); // reduce max tokens by 5% to be safe, calculation is a little wrong.
345
361
  }
362
+ const headersFromData = (data.headers ?? []).reduce((acc, header) => {
363
+ acc[header.key] = header.value;
364
+ return acc;
365
+ }, {});
366
+ const additionalHeaders = data.useHeadersInput
367
+ ? coerceTypeOptional(inputs['headers'], 'object') ??
368
+ headersFromData
369
+ : headersFromData;
370
+ const allAdditionalHeaders = cleanHeaders({
371
+ ...context.settings.chatNodeHeaders,
372
+ ...additionalHeaders,
373
+ });
346
374
  try {
347
375
  return await retry(async () => {
348
376
  const completionOptions = {
@@ -365,7 +393,7 @@ export const ChatAnthropicNodeImpl = {
365
393
  ? tools.map((tool) => ({ name: tool.name, description: tool.description, input_schema: tool.parameters }))
366
394
  : undefined,
367
395
  };
368
- const useMessageApi = model.startsWith('claude-3');
396
+ const useMessageApi = model.startsWith('claude-3') || model.startsWith('claude-sonnet') || model.startsWith('claude-opus');
369
397
  const cacheKey = JSON.stringify(useMessageApi ? messageOptions : completionOptions);
370
398
  if (data.cache) {
371
399
  const cached = cache.get(cacheKey);
@@ -385,6 +413,7 @@ export const ChatAnthropicNodeImpl = {
385
413
  apiKey: apiKey ?? '',
386
414
  signal: context.signal,
387
415
  beta: 'prompt-caching-2024-07-31',
416
+ additionalHeaders: allAdditionalHeaders,
388
417
  ...messageOptions,
389
418
  });
390
419
  // Process the response chunks and update the output
@@ -516,6 +545,7 @@ export const ChatAnthropicNodeImpl = {
516
545
  apiEndpoint,
517
546
  apiKey: apiKey ?? '',
518
547
  signal: context.signal,
548
+ additionalHeaders: allAdditionalHeaders,
519
549
  ...completionOptions,
520
550
  });
521
551
  // Process the response chunks and update the output
@@ -1,4 +1,4 @@
1
- import {} from '@google/generative-ai';
1
+ import {} from '@google/genai';
2
2
  export const googleModelsDeprecated = {
3
3
  'gemini-pro': {
4
4
  maxTokens: 32760,
@@ -18,12 +18,20 @@ export const googleModelsDeprecated = {
18
18
  },
19
19
  };
20
20
  export const generativeAiGoogleModels = {
21
- 'gemini-2.0-flash-001': {
21
+ 'gemini-2.5-flash-preview-04-17': {
22
22
  maxTokens: 1048576,
23
23
  cost: {
24
24
  prompt: 0.15 / 1000,
25
25
  completion: 0.6 / 1000,
26
26
  },
27
+ displayName: 'Gemini 2.5 Flash Preview',
28
+ },
29
+ 'gemini-2.0-flash-001': {
30
+ maxTokens: 1048576,
31
+ cost: {
32
+ prompt: 0.1 / 1000,
33
+ completion: 0.4 / 1000,
34
+ },
27
35
  displayName: 'Gemini 2.0 Flash',
28
36
  },
29
37
  'gemini-2.0-pro-exp-02-05': {
@@ -99,31 +107,38 @@ export const generativeAiOptions = Object.entries(generativeAiGoogleModels).map(
99
107
  value: id,
100
108
  label: displayName,
101
109
  }));
102
- export async function* streamGenerativeAi({ apiKey, model, systemPrompt, prompt, maxOutputTokens, temperature, topP, topK, signal, tools, }) {
103
- const { GoogleGenerativeAI } = await import('@google/generative-ai');
104
- const genAi = new GoogleGenerativeAI(apiKey);
105
- const genaiModel = genAi.getGenerativeModel({
110
+ export async function* streamGenerativeAi({ apiKey, model, systemPrompt, prompt, maxOutputTokens, temperature, topP, topK, signal, tools, thinkingBudget, additionalHeaders, }) {
111
+ const { GoogleGenAI } = await import('@google/genai');
112
+ const genAi = new GoogleGenAI({ apiKey });
113
+ const result = await genAi.models.generateContentStream({
106
114
  model,
107
- systemInstruction: systemPrompt,
108
- generationConfig: {
115
+ contents: prompt,
116
+ config: {
117
+ systemInstruction: systemPrompt,
109
118
  maxOutputTokens,
110
119
  temperature,
111
120
  topP,
112
121
  topK,
122
+ tools,
123
+ abortSignal: signal,
124
+ thinkingConfig: {
125
+ thinkingBudget,
126
+ },
127
+ httpOptions: {
128
+ headers: {
129
+ ...additionalHeaders,
130
+ },
131
+ },
113
132
  },
114
- tools,
115
133
  });
116
- const result = await genaiModel.generateContentStream({
117
- contents: prompt,
118
- }, { signal });
119
- for await (const chunk of result.stream) {
134
+ for await (const chunk of result) {
120
135
  const outChunk = {
121
136
  completion: undefined,
122
137
  finish_reason: undefined,
123
138
  function_calls: undefined,
124
139
  model,
125
140
  };
126
- const functionCalls = chunk.functionCalls();
141
+ const functionCalls = chunk.functionCalls;
127
142
  if (functionCalls) {
128
143
  outChunk.function_calls = functionCalls;
129
144
  }
@@ -10,8 +10,8 @@ import { getError } from '../../../utils/errors.js';
10
10
  import { uint8ArrayToBase64 } from '../../../utils/base64.js';
11
11
  import { pluginNodeDefinition } from '../../../model/NodeDefinition.js';
12
12
  import { getScalarTypeOf, isArrayDataValue } from '../../../model/DataValue.js';
13
- import { getInputOrData } from '../../../utils/inputs.js';
14
- import { SchemaType, } from '@google/generative-ai';
13
+ import { getInputOrData, cleanHeaders } from '../../../utils/inputs.js';
14
+ import { Type } from '@google/genai';
15
15
  import { mapValues } from 'lodash-es';
16
16
  // Temporary
17
17
  const cache = new Map();
@@ -27,7 +27,7 @@ export const ChatGoogleNodeImpl = {
27
27
  width: 275,
28
28
  },
29
29
  data: {
30
- model: 'gemini-2.0-flash-001',
30
+ model: 'gemini-2.5-flash-preview-04-17',
31
31
  useModelInput: false,
32
32
  temperature: 0.5,
33
33
  useTemperatureInput: false,
@@ -42,6 +42,8 @@ export const ChatGoogleNodeImpl = {
42
42
  cache: false,
43
43
  useAsGraphPartialOutput: true,
44
44
  useToolCalling: false,
45
+ thinkingBudget: undefined,
46
+ useThinkingBudgetInput: false,
45
47
  },
46
48
  };
47
49
  return chartNode;
@@ -99,11 +101,27 @@ export const ChatGoogleNodeImpl = {
99
101
  description: 'Tools available for the model to call.',
100
102
  });
101
103
  }
104
+ if (data.useThinkingBudgetInput) {
105
+ inputs.push({
106
+ dataType: 'number',
107
+ id: 'thinkingBudget',
108
+ title: 'Thinking Budget',
109
+ description: 'The token budget for the model to think before responding.',
110
+ });
111
+ }
102
112
  inputs.push({
103
113
  dataType: ['chat-message', 'chat-message[]'],
104
114
  id: 'prompt',
105
115
  title: 'Prompt',
106
116
  });
117
+ if (data.useHeadersInput) {
118
+ inputs.push({
119
+ dataType: 'object',
120
+ id: 'headers',
121
+ title: 'Headers',
122
+ description: 'Additional headers to send to the API.',
123
+ });
124
+ }
107
125
  return inputs;
108
126
  },
109
127
  getOutputDefinitions(data) {
@@ -142,6 +160,7 @@ export const ChatGoogleNodeImpl = {
142
160
  ? `Top P: ${data.useTopPInput ? '(Using Input)' : data.top_p}`
143
161
  : `Temperature: ${data.useTemperatureInput ? '(Using Input)' : data.temperature}`}
144
162
  Max Tokens: ${data.maxTokens}
163
+ Thinking Budget: ${data.thinkingBudget ?? 'Automatic'}
145
164
  `;
146
165
  },
147
166
  getEditors() {
@@ -186,6 +205,17 @@ export const ChatGoogleNodeImpl = {
186
205
  max: Number.MAX_SAFE_INTEGER,
187
206
  step: 1,
188
207
  },
208
+ {
209
+ type: 'number',
210
+ label: 'Thinking Budget',
211
+ dataKey: 'thinkingBudget',
212
+ allowEmpty: true,
213
+ step: 1,
214
+ min: 0,
215
+ max: Number.MAX_SAFE_INTEGER,
216
+ useInputToggleDataKey: 'useThinkingBudgetInput',
217
+ helperMessage: 'The token budget for the model to think before responding. Leave blank for automatic budget.',
218
+ },
189
219
  {
190
220
  type: 'toggle',
191
221
  label: 'Enable Tool Calling',
@@ -201,6 +231,14 @@ export const ChatGoogleNodeImpl = {
201
231
  label: 'Use for subgraph partial output',
202
232
  dataKey: 'useAsGraphPartialOutput',
203
233
  },
234
+ {
235
+ type: 'keyValuePair',
236
+ label: 'Headers',
237
+ dataKey: 'headers',
238
+ useInputToggleDataKey: 'useHeadersInput',
239
+ keyPlaceholder: 'Header',
240
+ helperMessage: 'Additional headers to send to the API.',
241
+ },
204
242
  ];
205
243
  },
206
244
  getUIData() {
@@ -221,6 +259,7 @@ export const ChatGoogleNodeImpl = {
221
259
  const temperature = getInputOrData(data, inputs, 'temperature', 'number');
222
260
  const topP = getInputOrData(data, inputs, 'top_p', 'number');
223
261
  const useTopP = getInputOrData(data, inputs, 'useTopP', 'boolean');
262
+ const thinkingBudget = getInputOrData(data, inputs, 'thinkingBudget', 'number');
224
263
  const { messages } = getChatGoogleNodeMessages(inputs);
225
264
  let prompt = await Promise.all(messages.map(async (message) => {
226
265
  if (message.type === 'user' || message.type === 'assistant') {
@@ -326,7 +365,7 @@ export const ChatGoogleNodeImpl = {
326
365
  parameters: Object.keys(tool.parameters.properties).length === 0
327
366
  ? undefined
328
367
  : {
329
- type: SchemaType.OBJECT,
368
+ type: Type.OBJECT,
330
369
  properties: mapValues(tool.parameters.properties, (p) => ({
331
370
  // gemini doesn't support union property types, it uses openapi style not jsonschema, what a mess
332
371
  type: Array.isArray(p.type) ? p.type.filter((t) => t !== 'null')[0] : p.type,
@@ -350,6 +389,18 @@ export const ChatGoogleNodeImpl = {
350
389
  throw new Error('Google Application Credentials or Google API Key is not defined.');
351
390
  }
352
391
  }
392
+ const headersFromData = (data.headers ?? []).reduce((acc, header) => {
393
+ acc[header.key] = header.value;
394
+ return acc;
395
+ }, {});
396
+ const additionalHeaders = data.useHeadersInput
397
+ ? coerceTypeOptional(inputs['headers'], 'object') ??
398
+ headersFromData
399
+ : headersFromData;
400
+ const allAdditionalHeaders = cleanHeaders({
401
+ ...context.settings.chatNodeHeaders,
402
+ ...additionalHeaders,
403
+ });
353
404
  try {
354
405
  return await retry(async () => {
355
406
  const options = {
@@ -361,6 +412,8 @@ export const ChatGoogleNodeImpl = {
361
412
  systemPrompt,
362
413
  topK: undefined,
363
414
  tools,
415
+ thinkingBudget,
416
+ additionalHeaders: allAdditionalHeaders,
364
417
  };
365
418
  const cacheKey = JSON.stringify(options);
366
419
  if (data.cache) {
@@ -386,6 +439,8 @@ export const ChatGoogleNodeImpl = {
386
439
  apiKey,
387
440
  systemPrompt,
388
441
  tools,
442
+ thinkingBudget,
443
+ additionalHeaders: allAdditionalHeaders,
389
444
  });
390
445
  }
391
446
  else {
@@ -404,6 +459,14 @@ export const ChatGoogleNodeImpl = {
404
459
  }
405
460
  const responseParts = [];
406
461
  const functionCalls = [];
462
+ let throttleLastCalledTime = Date.now();
463
+ const onPartialOutput = (output) => {
464
+ const now = Date.now();
465
+ if (now - throttleLastCalledTime > (context.settings.throttleChatNode ?? 100)) {
466
+ context.onPartialOutputs?.(output);
467
+ throttleLastCalledTime = now;
468
+ }
469
+ };
407
470
  for await (const chunk of chunks) {
408
471
  if (chunk.completion) {
409
472
  responseParts.push(chunk.completion);
@@ -423,8 +486,10 @@ export const ChatGoogleNodeImpl = {
423
486
  })),
424
487
  };
425
488
  }
426
- context.onPartialOutputs?.(output);
489
+ onPartialOutput?.(output);
427
490
  }
491
+ // Call one last time manually to ensure the last output is sent
492
+ context.onPartialOutputs?.(output);
428
493
  const endTime = Date.now();
429
494
  output['all-messages'] = {
430
495
  type: 'chat-message[]',
@@ -1,6 +1,9 @@
1
1
  import { dedent } from './misc.js';
2
- export const TOKEN_MATCH_REGEX = /\{\{(?!\{)([^{}\s][^{}]*[^{}\s]|[^{}\s])\}\}(?!\})/g;
3
- export const ESCAPED_TOKEN_REGEX = /\{{3}([^{}]+)\}{3}/g;
2
+ import { get as lodashGet } from 'lodash-es';
3
+ // Simpler regex allowing spaces, relies on trim() later
4
+ export const TOKEN_MATCH_REGEX = /\{\{([^}]+?)\}\}/g;
5
+ export const ESCAPED_TOKEN_REGEX = /\{\{\{([^}]+?)\}\}\}/g;
6
+ export const ESCAPED_ESCAPED_TOKEN_REGEX = /\\\{\\\{([^}]+?)\\\}\\\}/g;
4
7
  const processingFunctions = {
5
8
  indent: (input, spaces = 0) => {
6
9
  const indent = ' '.repeat(spaces);
@@ -62,7 +65,99 @@ const processingFunctions = {
62
65
  return lines.join('\n');
63
66
  },
64
67
  };
65
- // Parse processing instructions like "indent 2" or "quote" into function name and parameter
68
+ // Helper function to check and potentially unwrap a DataValue-like object
69
+ export function unwrapPotentialDataValue(value) {
70
+ if (typeof value === 'object' &&
71
+ value !== null &&
72
+ typeof value.type === 'string' &&
73
+ Object.prototype.hasOwnProperty.call(value, 'value') // More robust check for 'value' property
74
+ ) {
75
+ return value.value;
76
+ }
77
+ return value;
78
+ }
79
+ // Renamed from resolveExpression, now exported and returns raw value
80
+ export function resolveExpressionRawValue(source, expression, sourceType) {
81
+ if (!source) {
82
+ return undefined;
83
+ }
84
+ // Regex to capture the main key and the optional path starting with . or [
85
+ // Allows for spaces around the key, path, ., and []
86
+ // Key: Group 1; Path: Group 2
87
+ const match = expression.trim().match(/^([^[.\s]+)\s*(.*)$/);
88
+ let key;
89
+ let path;
90
+ // Check if match is successful AND group 1 (the key) was captured
91
+ if (match && typeof match[1] === 'string') {
92
+ key = match[1];
93
+ const rawPath = match[2]; // Group 2 (the path part, might start with . or [)
94
+ // Clean and assign path only if rawPath is not empty
95
+ if (rawPath) {
96
+ // Clean path: Trim whitespace, then remove spaces around separators '.', '[', ']'
97
+ // Preserve the leading '.' or '[' as lodashGet handles them.
98
+ path = rawPath.trim().replace(/\s*(\.|\[|\])\s*/g, '$1');
99
+ }
100
+ else {
101
+ path = undefined;
102
+ }
103
+ }
104
+ else {
105
+ // If match failed or group 1 wasn't captured (fallback)
106
+ // Assume the entire expression is the key and there's no path
107
+ key = expression.trim();
108
+ path = undefined;
109
+ }
110
+ if (!key) {
111
+ // If key is empty after trimming, it's invalid.
112
+ return undefined;
113
+ }
114
+ const topLevelValue = source[key];
115
+ if (topLevelValue === undefined) {
116
+ return undefined; // Key not found in source
117
+ }
118
+ // Get the base value by potentially unwrapping the top-level value using the shared helper
119
+ const baseValue = unwrapPotentialDataValue(topLevelValue);
120
+ // If there's a path, try to resolve it using lodashGet on the baseValue
121
+ let finalValue;
122
+ if (path) {
123
+ try {
124
+ finalValue = lodashGet(baseValue, path);
125
+ // IMPORTANT: After getting a potentially nested value via path,
126
+ // we might *still* have a DataValue (if the context stores them nested).
127
+ // Unwrap again to be safe.
128
+ finalValue = unwrapPotentialDataValue(finalValue);
129
+ }
130
+ catch (error) {
131
+ console.warn(`Error accessing path "${path}" in ${sourceType} value for key "${key}":`, error);
132
+ return undefined; // Error during path access
133
+ }
134
+ }
135
+ else {
136
+ finalValue = baseValue; // No path, use the (potentially unwrapped) base value
137
+ }
138
+ // Return the raw final value
139
+ return finalValue;
140
+ }
141
+ // New function: Resolves and converts to string format suitable for TextNode
142
+ export function resolveExpressionToString(source, expression, sourceType) {
143
+ const finalValue = resolveExpressionRawValue(source, expression, sourceType);
144
+ if (finalValue === undefined) {
145
+ return undefined;
146
+ }
147
+ // Convert the final value to a string for TextNode context
148
+ if (typeof finalValue === 'object' && finalValue !== null) {
149
+ try {
150
+ return JSON.stringify(finalValue);
151
+ }
152
+ catch (error) {
153
+ console.warn(`Error stringifying object/array in ${sourceType} for expression "${expression}":`, error);
154
+ return '[object Object]'; // Fallback
155
+ }
156
+ }
157
+ // For primitives
158
+ return String(finalValue);
159
+ }
160
+ // Helper function to parse processing instructions like "indent 2" or "quote" into function name and parameter
66
161
  function parseProcessing(instruction) {
67
162
  const parts = instruction.trim().split(/\s+/);
68
163
  return {
@@ -83,29 +178,72 @@ function applyProcessing(value, processingChain) {
83
178
  return processingFunc(result, param);
84
179
  }, value);
85
180
  }
86
- export function interpolate(baseString, values) {
87
- return baseString
88
- .replace(TOKEN_MATCH_REGEX, (_m, p1) => {
89
- const [token, ...processing] = p1.split('|');
90
- const value = values[token.trim()];
91
- if (value === undefined)
181
+ export function interpolate(template, variables, graphInputValues, contextValues) {
182
+ return template
183
+ .replace(ESCAPED_TOKEN_REGEX, (_match, expression) => {
184
+ // Replace with \{\{expression\}\} to escape
185
+ return `\\{\\{${expression}\\}\\}`; // Escaped token
186
+ })
187
+ .replace(/\{\{((?:@graphInputs|@context)\..*?|[^}]+?)\}\}/g, (_match, expressionWithMaybeProcessing) => {
188
+ const parts = expressionWithMaybeProcessing.split('|').map((s) => s.trim());
189
+ const expression = parts[0]; // The variable name or path, e.g., @context.foo.bar or myVar
190
+ const processingChain = parts.slice(1).join('|'); // e.g., indent 2 | quote
191
+ let resolvedValue;
192
+ if (expression.startsWith('@graphInputs.')) {
193
+ // Use the new string-converting function
194
+ resolvedValue = resolveExpressionToString(graphInputValues, expression.substring('@graphInputs.'.length), 'graphInputs');
195
+ }
196
+ else if (expression.startsWith('@context.')) {
197
+ // Use the new string-converting function
198
+ resolvedValue = resolveExpressionToString(contextValues, expression.substring('@context.'.length), 'context');
199
+ }
200
+ else {
201
+ const simpleVar = variables[expression];
202
+ if (simpleVar !== undefined) {
203
+ // Simple variables might be DataValue or raw strings
204
+ resolvedValue = String(unwrapPotentialDataValue(simpleVar) ?? '');
205
+ }
206
+ else {
207
+ resolvedValue = undefined; // Variable not found
208
+ }
209
+ }
210
+ if (resolvedValue === undefined) {
211
+ // Return an empty string if the variable is not found or resolves to undefined
212
+ console.warn(`Interpolation variable or path "${expression}" not found or resolved to undefined.`);
92
213
  return '';
93
- if (processing.length > 0) {
94
- return applyProcessing(value, p1);
95
214
  }
96
- return value;
215
+ // Apply processing if any instructions exist
216
+ if (processingChain) {
217
+ return applyProcessing(resolvedValue, processingChain);
218
+ }
219
+ return resolvedValue;
97
220
  })
98
- .replace(ESCAPED_TOKEN_REGEX, (_m, p1) => {
99
- return `{{${p1}}}`;
221
+ .replace(ESCAPED_ESCAPED_TOKEN_REGEX, (_match, expression) => {
222
+ // Replace with {{expression}} to unescape
223
+ return `{{${expression}}}`; // Unescaped token
100
224
  });
101
225
  }
102
226
  // Extract all unique variable names from a template string
227
+ // Ignores variables starting with @graphInputs. or @context., as they are treated as special references.
103
228
  export function extractInterpolationVariables(template) {
104
- const matches = template.matchAll(TOKEN_MATCH_REGEX);
229
+ const matches = template
230
+ .replace(ESCAPED_TOKEN_REGEX, (_match, content) => {
231
+ // Replace escaped tokens with the escaped escaped version so they're not matched
232
+ return `\\{\\{${content}\\}\\}`;
233
+ })
234
+ .matchAll(TOKEN_MATCH_REGEX);
105
235
  const variables = new Set();
106
236
  for (const match of matches) {
107
- const [token] = match[1].split('|');
108
- variables.add(token.trim());
237
+ if (match[1]) {
238
+ const [tokenPart] = match[1].split('|');
239
+ if (tokenPart) {
240
+ const token = tokenPart.trim();
241
+ // Filter out special prefixes
242
+ if (!token.startsWith('@graphInputs.') && !token.startsWith('@context.')) {
243
+ variables.add(token);
244
+ }
245
+ }
246
+ }
109
247
  }
110
248
  return Array.from(variables);
111
249
  }
@@ -188,6 +188,30 @@ export const openaiModels = {
188
188
  },
189
189
  displayName: 'GPT-4o Audio (Preview)',
190
190
  },
191
+ 'gpt-4.1': {
192
+ maxTokens: 1_047_576,
193
+ cost: {
194
+ prompt: 2e-6,
195
+ completion: 8e-6,
196
+ },
197
+ displayName: 'GPT-4.1',
198
+ },
199
+ o3: {
200
+ maxTokens: 200_000,
201
+ cost: {
202
+ prompt: 10e-6,
203
+ completion: 40e-6,
204
+ },
205
+ displayName: 'o3',
206
+ },
207
+ 'o4-mini': {
208
+ maxTokens: 200_000,
209
+ cost: {
210
+ prompt: 1.1e-6,
211
+ completion: 4.4e-6,
212
+ },
213
+ displayName: 'o4-mini',
214
+ },
191
215
  'local-model': {
192
216
  maxTokens: Number.MAX_SAFE_INTEGER,
193
217
  cost: {
@@ -1,5 +1,5 @@
1
1
  import type { PascalCase } from 'type-fest';
2
- import { type AttachedData, type AudioProvider, type DataValue, type DatasetProvider, type ExternalFunction, type NativeApi, type NodeRegistration, type ProcessContext, type ProcessEvents, type Project, type RivetEventStreamFilterSpec, type Settings } from '../index.js';
2
+ import { type AttachedData, type AudioProvider, type DataValue, type DatasetProvider, type ExternalFunction, type MCPProvider, type NativeApi, type NodeRegistration, type ProcessContext, type ProcessEvents, type Project, type RivetEventStreamFilterSpec, type Settings } from '../index.js';
3
3
  import { GraphProcessor } from '../model/GraphProcessor.js';
4
4
  import type { Tokenizer } from '../integrations/Tokenizer.js';
5
5
  import { type LooseDataValue } from './looseDataValue.js';
@@ -12,6 +12,7 @@ export type RunGraphOptions = {
12
12
  nativeApi?: NativeApi;
13
13
  datasetProvider?: DatasetProvider;
14
14
  audioProvider?: AudioProvider;
15
+ mcpProvider?: MCPProvider;
15
16
  externalFunctions?: {
16
17
  [key: string]: ExternalFunction;
17
18
  };
@@ -19,7 +20,7 @@ export type RunGraphOptions = {
19
20
  [key: string]: (data: DataValue | undefined) => void;
20
21
  };
21
22
  abortSignal?: AbortSignal;
22
- registry?: NodeRegistration;
23
+ registry?: NodeRegistration<any, any>;
23
24
  includeTrace?: boolean;
24
25
  getChatNodeEndpoint?: ProcessContext['getChatNodeEndpoint'];
25
26
  tokenizer?: Tokenizer;