@alpic80/rivet-core 1.24.0-aidon.5 → 1.24.2-aidon.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. package/README.md +9 -6
  2. package/dist/cjs/bundle.cjs +1534 -278
  3. package/dist/cjs/bundle.cjs.map +4 -4
  4. package/dist/esm/api/createProcessor.js +2 -0
  5. package/dist/esm/api/streaming.js +48 -2
  6. package/dist/esm/exports.js +1 -0
  7. package/dist/esm/integrations/CodeRunner.js +10 -2
  8. package/dist/esm/integrations/mcp/MCPBase.js +100 -0
  9. package/dist/esm/integrations/mcp/MCPProvider.js +23 -0
  10. package/dist/esm/integrations/mcp/MCPUtils.js +33 -0
  11. package/dist/esm/model/GraphProcessor.js +7 -1
  12. package/dist/esm/model/NodeRegistration.js +0 -1
  13. package/dist/esm/model/Nodes.js +9 -0
  14. package/dist/esm/model/nodes/ChatNodeBase.js +1 -1
  15. package/dist/esm/model/nodes/CodeNode.js +1 -1
  16. package/dist/esm/model/nodes/GetAllDatasetsNode.js +1 -1
  17. package/dist/esm/model/nodes/GraphInputNode.js +2 -0
  18. package/dist/esm/model/nodes/HttpCallNode.js +2 -2
  19. package/dist/esm/model/nodes/MCPDiscoveryNode.js +239 -0
  20. package/dist/esm/model/nodes/MCPGetPromptNode.js +262 -0
  21. package/dist/esm/model/nodes/MCPToolCallNode.js +290 -0
  22. package/dist/esm/model/nodes/ObjectNode.js +42 -21
  23. package/dist/esm/model/nodes/PromptNode.js +1 -1
  24. package/dist/esm/model/nodes/SubGraphNode.js +1 -0
  25. package/dist/esm/model/nodes/TextNode.js +13 -2
  26. package/dist/esm/plugins/aidon/nodes/ChatAidonNode.js +7 -5
  27. package/dist/esm/plugins/aidon/plugin.js +15 -0
  28. package/dist/esm/plugins/anthropic/anthropic.js +22 -3
  29. package/dist/esm/plugins/anthropic/nodes/ChatAnthropicNode.js +33 -3
  30. package/dist/esm/plugins/google/google.js +29 -14
  31. package/dist/esm/plugins/google/nodes/ChatGoogleNode.js +70 -5
  32. package/dist/esm/plugins/huggingface/nodes/ChatHuggingFace.js +4 -2
  33. package/dist/esm/plugins/huggingface/nodes/TextToImageHuggingFace.js +5 -3
  34. package/dist/esm/utils/interpolation.js +155 -17
  35. package/dist/esm/utils/openai.js +24 -0
  36. package/dist/types/api/createProcessor.d.ts +3 -2
  37. package/dist/types/api/streaming.d.ts +7 -1
  38. package/dist/types/exports.d.ts +1 -0
  39. package/dist/types/integrations/CodeRunner.d.ts +4 -3
  40. package/dist/types/integrations/mcp/MCPBase.d.ts +20 -0
  41. package/dist/types/integrations/mcp/MCPProvider.d.ts +153 -0
  42. package/dist/types/integrations/mcp/MCPUtils.d.ts +9 -0
  43. package/dist/types/model/GraphProcessor.d.ts +5 -1
  44. package/dist/types/model/Nodes.d.ts +13 -2
  45. package/dist/types/model/ProcessContext.d.ts +5 -1
  46. package/dist/types/model/Project.d.ts +2 -0
  47. package/dist/types/model/nodes/GetAllDatasetsNode.d.ts +2 -2
  48. package/dist/types/model/nodes/MCPDiscoveryNode.d.ts +9 -0
  49. package/dist/types/model/nodes/MCPGetPromptNode.d.ts +23 -0
  50. package/dist/types/model/nodes/MCPToolCallNode.d.ts +26 -0
  51. package/dist/types/model/nodes/ObjectNode.d.ts +3 -2
  52. package/dist/types/model/nodes/TextNode.d.ts +2 -1
  53. package/dist/types/plugins/anthropic/anthropic.d.ts +21 -3
  54. package/dist/types/plugins/anthropic/nodes/ChatAnthropicNode.d.ts +5 -0
  55. package/dist/types/plugins/google/google.d.ts +12 -2
  56. package/dist/types/plugins/google/nodes/ChatGoogleNode.d.ts +7 -0
  57. package/dist/types/utils/interpolation.d.ts +6 -1
  58. package/dist/types/utils/openai.d.ts +24 -0
  59. package/package.json +7 -7
@@ -1,4 +1,4 @@
1
- import {} from '@google/generative-ai';
1
+ import {} from '@google/genai';
2
2
  export const googleModelsDeprecated = {
3
3
  'gemini-pro': {
4
4
  maxTokens: 32760,
@@ -18,12 +18,20 @@ export const googleModelsDeprecated = {
18
18
  },
19
19
  };
20
20
  export const generativeAiGoogleModels = {
21
- 'gemini-2.0-flash-001': {
21
+ 'gemini-2.5-flash-preview-04-17': {
22
22
  maxTokens: 1048576,
23
23
  cost: {
24
24
  prompt: 0.15 / 1000,
25
25
  completion: 0.6 / 1000,
26
26
  },
27
+ displayName: 'Gemini 2.5 Flash Preview',
28
+ },
29
+ 'gemini-2.0-flash-001': {
30
+ maxTokens: 1048576,
31
+ cost: {
32
+ prompt: 0.1 / 1000,
33
+ completion: 0.4 / 1000,
34
+ },
27
35
  displayName: 'Gemini 2.0 Flash',
28
36
  },
29
37
  'gemini-2.0-pro-exp-02-05': {
@@ -99,31 +107,38 @@ export const generativeAiOptions = Object.entries(generativeAiGoogleModels).map(
99
107
  value: id,
100
108
  label: displayName,
101
109
  }));
102
- export async function* streamGenerativeAi({ apiKey, model, systemPrompt, prompt, maxOutputTokens, temperature, topP, topK, signal, tools, }) {
103
- const { GoogleGenerativeAI } = await import('@google/generative-ai');
104
- const genAi = new GoogleGenerativeAI(apiKey);
105
- const genaiModel = genAi.getGenerativeModel({
110
+ export async function* streamGenerativeAi({ apiKey, model, systemPrompt, prompt, maxOutputTokens, temperature, topP, topK, signal, tools, thinkingBudget, additionalHeaders, }) {
111
+ const { GoogleGenAI } = await import('@google/genai');
112
+ const genAi = new GoogleGenAI({ apiKey });
113
+ const result = await genAi.models.generateContentStream({
106
114
  model,
107
- systemInstruction: systemPrompt,
108
- generationConfig: {
115
+ contents: prompt,
116
+ config: {
117
+ systemInstruction: systemPrompt,
109
118
  maxOutputTokens,
110
119
  temperature,
111
120
  topP,
112
121
  topK,
122
+ tools,
123
+ abortSignal: signal,
124
+ thinkingConfig: {
125
+ thinkingBudget,
126
+ },
127
+ httpOptions: {
128
+ headers: {
129
+ ...additionalHeaders,
130
+ },
131
+ },
113
132
  },
114
- tools,
115
133
  });
116
- const result = await genaiModel.generateContentStream({
117
- contents: prompt,
118
- }, { signal });
119
- for await (const chunk of result.stream) {
134
+ for await (const chunk of result) {
120
135
  const outChunk = {
121
136
  completion: undefined,
122
137
  finish_reason: undefined,
123
138
  function_calls: undefined,
124
139
  model,
125
140
  };
126
- const functionCalls = chunk.functionCalls();
141
+ const functionCalls = chunk.functionCalls;
127
142
  if (functionCalls) {
128
143
  outChunk.function_calls = functionCalls;
129
144
  }
@@ -10,8 +10,8 @@ import { getError } from '../../../utils/errors.js';
10
10
  import { uint8ArrayToBase64 } from '../../../utils/base64.js';
11
11
  import { pluginNodeDefinition } from '../../../model/NodeDefinition.js';
12
12
  import { getScalarTypeOf, isArrayDataValue } from '../../../model/DataValue.js';
13
- import { getInputOrData } from '../../../utils/inputs.js';
14
- import { SchemaType, } from '@google/generative-ai';
13
+ import { getInputOrData, cleanHeaders } from '../../../utils/inputs.js';
14
+ import { Type } from '@google/genai';
15
15
  import { mapValues } from 'lodash-es';
16
16
  // Temporary
17
17
  const cache = new Map();
@@ -27,7 +27,7 @@ export const ChatGoogleNodeImpl = {
27
27
  width: 275,
28
28
  },
29
29
  data: {
30
- model: 'gemini-2.0-flash-001',
30
+ model: 'gemini-2.5-flash-preview-04-17',
31
31
  useModelInput: false,
32
32
  temperature: 0.5,
33
33
  useTemperatureInput: false,
@@ -42,6 +42,8 @@ export const ChatGoogleNodeImpl = {
42
42
  cache: false,
43
43
  useAsGraphPartialOutput: true,
44
44
  useToolCalling: false,
45
+ thinkingBudget: undefined,
46
+ useThinkingBudgetInput: false,
45
47
  },
46
48
  };
47
49
  return chartNode;
@@ -99,11 +101,27 @@ export const ChatGoogleNodeImpl = {
99
101
  description: 'Tools available for the model to call.',
100
102
  });
101
103
  }
104
+ if (data.useThinkingBudgetInput) {
105
+ inputs.push({
106
+ dataType: 'number',
107
+ id: 'thinkingBudget',
108
+ title: 'Thinking Budget',
109
+ description: 'The token budget for the model to think before responding.',
110
+ });
111
+ }
102
112
  inputs.push({
103
113
  dataType: ['chat-message', 'chat-message[]'],
104
114
  id: 'prompt',
105
115
  title: 'Prompt',
106
116
  });
117
+ if (data.useHeadersInput) {
118
+ inputs.push({
119
+ dataType: 'object',
120
+ id: 'headers',
121
+ title: 'Headers',
122
+ description: 'Additional headers to send to the API.',
123
+ });
124
+ }
107
125
  return inputs;
108
126
  },
109
127
  getOutputDefinitions(data) {
@@ -142,6 +160,7 @@ export const ChatGoogleNodeImpl = {
142
160
  ? `Top P: ${data.useTopPInput ? '(Using Input)' : data.top_p}`
143
161
  : `Temperature: ${data.useTemperatureInput ? '(Using Input)' : data.temperature}`}
144
162
  Max Tokens: ${data.maxTokens}
163
+ Thinking Budget: ${data.thinkingBudget ?? 'Automatic'}
145
164
  `;
146
165
  },
147
166
  getEditors() {
@@ -186,6 +205,17 @@ export const ChatGoogleNodeImpl = {
186
205
  max: Number.MAX_SAFE_INTEGER,
187
206
  step: 1,
188
207
  },
208
+ {
209
+ type: 'number',
210
+ label: 'Thinking Budget',
211
+ dataKey: 'thinkingBudget',
212
+ allowEmpty: true,
213
+ step: 1,
214
+ min: 0,
215
+ max: Number.MAX_SAFE_INTEGER,
216
+ useInputToggleDataKey: 'useThinkingBudgetInput',
217
+ helperMessage: 'The token budget for the model to think before responding. Leave blank for automatic budget.',
218
+ },
189
219
  {
190
220
  type: 'toggle',
191
221
  label: 'Enable Tool Calling',
@@ -201,6 +231,14 @@ export const ChatGoogleNodeImpl = {
201
231
  label: 'Use for subgraph partial output',
202
232
  dataKey: 'useAsGraphPartialOutput',
203
233
  },
234
+ {
235
+ type: 'keyValuePair',
236
+ label: 'Headers',
237
+ dataKey: 'headers',
238
+ useInputToggleDataKey: 'useHeadersInput',
239
+ keyPlaceholder: 'Header',
240
+ helperMessage: 'Additional headers to send to the API.',
241
+ },
204
242
  ];
205
243
  },
206
244
  getUIData() {
@@ -221,6 +259,7 @@ export const ChatGoogleNodeImpl = {
221
259
  const temperature = getInputOrData(data, inputs, 'temperature', 'number');
222
260
  const topP = getInputOrData(data, inputs, 'top_p', 'number');
223
261
  const useTopP = getInputOrData(data, inputs, 'useTopP', 'boolean');
262
+ const thinkingBudget = getInputOrData(data, inputs, 'thinkingBudget', 'number');
224
263
  const { messages } = getChatGoogleNodeMessages(inputs);
225
264
  let prompt = await Promise.all(messages.map(async (message) => {
226
265
  if (message.type === 'user' || message.type === 'assistant') {
@@ -326,7 +365,7 @@ export const ChatGoogleNodeImpl = {
326
365
  parameters: Object.keys(tool.parameters.properties).length === 0
327
366
  ? undefined
328
367
  : {
329
- type: SchemaType.OBJECT,
368
+ type: Type.OBJECT,
330
369
  properties: mapValues(tool.parameters.properties, (p) => ({
331
370
  // gemini doesn't support union property types, it uses openapi style not jsonschema, what a mess
332
371
  type: Array.isArray(p.type) ? p.type.filter((t) => t !== 'null')[0] : p.type,
@@ -350,6 +389,18 @@ export const ChatGoogleNodeImpl = {
350
389
  throw new Error('Google Application Credentials or Google API Key is not defined.');
351
390
  }
352
391
  }
392
+ const headersFromData = (data.headers ?? []).reduce((acc, header) => {
393
+ acc[header.key] = header.value;
394
+ return acc;
395
+ }, {});
396
+ const additionalHeaders = data.useHeadersInput
397
+ ? coerceTypeOptional(inputs['headers'], 'object') ??
398
+ headersFromData
399
+ : headersFromData;
400
+ const allAdditionalHeaders = cleanHeaders({
401
+ ...context.settings.chatNodeHeaders,
402
+ ...additionalHeaders,
403
+ });
353
404
  try {
354
405
  return await retry(async () => {
355
406
  const options = {
@@ -361,6 +412,8 @@ export const ChatGoogleNodeImpl = {
361
412
  systemPrompt,
362
413
  topK: undefined,
363
414
  tools,
415
+ thinkingBudget,
416
+ additionalHeaders: allAdditionalHeaders,
364
417
  };
365
418
  const cacheKey = JSON.stringify(options);
366
419
  if (data.cache) {
@@ -386,6 +439,8 @@ export const ChatGoogleNodeImpl = {
386
439
  apiKey,
387
440
  systemPrompt,
388
441
  tools,
442
+ thinkingBudget,
443
+ additionalHeaders: allAdditionalHeaders,
389
444
  });
390
445
  }
391
446
  else {
@@ -404,6 +459,14 @@ export const ChatGoogleNodeImpl = {
404
459
  }
405
460
  const responseParts = [];
406
461
  const functionCalls = [];
462
+ let throttleLastCalledTime = Date.now();
463
+ const onPartialOutput = (output) => {
464
+ const now = Date.now();
465
+ if (now - throttleLastCalledTime > (context.settings.throttleChatNode ?? 100)) {
466
+ context.onPartialOutputs?.(output);
467
+ throttleLastCalledTime = now;
468
+ }
469
+ };
407
470
  for await (const chunk of chunks) {
408
471
  if (chunk.completion) {
409
472
  responseParts.push(chunk.completion);
@@ -423,8 +486,10 @@ export const ChatGoogleNodeImpl = {
423
486
  })),
424
487
  };
425
488
  }
426
- context.onPartialOutputs?.(output);
489
+ onPartialOutput?.(output);
427
490
  }
491
+ // Call one last time manually to ensure the last output is sent
492
+ context.onPartialOutputs?.(output);
428
493
  const endTime = Date.now();
429
494
  output['all-messages'] = {
430
495
  type: 'chat-message[]',
@@ -1,6 +1,6 @@
1
1
  import { nanoid } from 'nanoid/non-secure';
2
2
  import {} from '../../../index.js';
3
- import { HfInference, HfInferenceEndpoint } from '@huggingface/inference';
3
+ import { InferenceClient } from '@huggingface/inference';
4
4
  import { getInputOrData } from '../../../utils/inputs.js';
5
5
  import { coerceType } from '../../../utils/coerceType.js';
6
6
  import { dedent } from '../../../utils/misc.js';
@@ -206,7 +206,9 @@ export const ChatHuggingFaceNodeImpl = {
206
206
  const repetitionPenalty = getInputOrData(data, inputData, 'repetitionPenalty', 'number');
207
207
  const topP = getInputOrData(data, inputData, 'topP', 'number');
208
208
  const topK = getInputOrData(data, inputData, 'topK', 'number');
209
- const hf = endpoint ? new HfInferenceEndpoint(endpoint, accessToken) : new HfInference(accessToken);
209
+ const hf = endpoint
210
+ ? new InferenceClient(accessToken, { endpointUrl: endpoint })
211
+ : new InferenceClient(accessToken);
210
212
  const generationStream = hf.textGenerationStream({
211
213
  inputs: prompt,
212
214
  model,
@@ -1,6 +1,6 @@
1
1
  import { nanoid } from 'nanoid/non-secure';
2
2
  import {} from '../../../index.js';
3
- import { HfInference, HfInferenceEndpoint } from '@huggingface/inference';
3
+ import { InferenceClient } from '@huggingface/inference';
4
4
  import { dedent } from 'ts-dedent';
5
5
  import { pluginNodeDefinition } from '../../../model/NodeDefinition.js';
6
6
  import { getInputOrData } from '../../../utils/inputs.js';
@@ -163,7 +163,9 @@ export const TextToImageHuggingFaceNodeImpl = {
163
163
  const negativePrompt = getInputOrData(data, inputData, 'negativePrompt') || undefined;
164
164
  const guidanceScale = getInputOrData(data, inputData, 'guidanceScale', 'number');
165
165
  const numInferenceSteps = getInputOrData(data, inputData, 'numInferenceSteps', 'number');
166
- const hf = endpoint ? new HfInferenceEndpoint(endpoint, accessToken) : new HfInference(accessToken);
166
+ const hf = endpoint
167
+ ? new InferenceClient(accessToken, { endpointUrl: endpoint })
168
+ : new InferenceClient(accessToken);
167
169
  const image = await hf.textToImage({
168
170
  inputs: prompt,
169
171
  model,
@@ -174,7 +176,7 @@ export const TextToImageHuggingFaceNodeImpl = {
174
176
  guidance_scale: guidanceScale,
175
177
  num_inference_steps: numInferenceSteps,
176
178
  },
177
- });
179
+ }, { outputType: "blob" });
178
180
  return {
179
181
  ['output']: {
180
182
  type: 'image',
@@ -1,6 +1,9 @@
1
1
  import { dedent } from './misc.js';
2
- export const TOKEN_MATCH_REGEX = /\{\{(?!\{)([^{}\s][^{}]*[^{}\s]|[^{}\s])\}\}(?!\})/g;
3
- export const ESCAPED_TOKEN_REGEX = /\{{3}([^{}]+)\}{3}/g;
2
+ import { get as lodashGet } from 'lodash-es';
3
+ // Simpler regex allowing spaces, relies on trim() later
4
+ export const TOKEN_MATCH_REGEX = /\{\{([^}]+?)\}\}/g;
5
+ export const ESCAPED_TOKEN_REGEX = /\{\{\{([^}]+?)\}\}\}/g;
6
+ export const ESCAPED_ESCAPED_TOKEN_REGEX = /\\\{\\\{([^}]+?)\\\}\\\}/g;
4
7
  const processingFunctions = {
5
8
  indent: (input, spaces = 0) => {
6
9
  const indent = ' '.repeat(spaces);
@@ -62,7 +65,99 @@ const processingFunctions = {
62
65
  return lines.join('\n');
63
66
  },
64
67
  };
65
- // Parse processing instructions like "indent 2" or "quote" into function name and parameter
68
+ // Helper function to check and potentially unwrap a DataValue-like object
69
+ export function unwrapPotentialDataValue(value) {
70
+ if (typeof value === 'object' &&
71
+ value !== null &&
72
+ typeof value.type === 'string' &&
73
+ Object.prototype.hasOwnProperty.call(value, 'value') // More robust check for 'value' property
74
+ ) {
75
+ return value.value;
76
+ }
77
+ return value;
78
+ }
79
+ // Renamed from resolveExpression, now exported and returns raw value
80
+ export function resolveExpressionRawValue(source, expression, sourceType) {
81
+ if (!source) {
82
+ return undefined;
83
+ }
84
+ // Regex to capture the main key and the optional path starting with . or [
85
+ // Allows for spaces around the key, path, ., and []
86
+ // Key: Group 1; Path: Group 2
87
+ const match = expression.trim().match(/^([^[.\s]+)\s*(.*)$/);
88
+ let key;
89
+ let path;
90
+ // Check if match is successful AND group 1 (the key) was captured
91
+ if (match && typeof match[1] === 'string') {
92
+ key = match[1];
93
+ const rawPath = match[2]; // Group 2 (the path part, might start with . or [)
94
+ // Clean and assign path only if rawPath is not empty
95
+ if (rawPath) {
96
+ // Clean path: Trim whitespace, then remove spaces around separators '.', '[', ']'
97
+ // Preserve the leading '.' or '[' as lodashGet handles them.
98
+ path = rawPath.trim().replace(/\s*(\.|\[|\])\s*/g, '$1');
99
+ }
100
+ else {
101
+ path = undefined;
102
+ }
103
+ }
104
+ else {
105
+ // If match failed or group 1 wasn't captured (fallback)
106
+ // Assume the entire expression is the key and there's no path
107
+ key = expression.trim();
108
+ path = undefined;
109
+ }
110
+ if (!key) {
111
+ // If key is empty after trimming, it's invalid.
112
+ return undefined;
113
+ }
114
+ const topLevelValue = source[key];
115
+ if (topLevelValue === undefined) {
116
+ return undefined; // Key not found in source
117
+ }
118
+ // Get the base value by potentially unwrapping the top-level value using the shared helper
119
+ const baseValue = unwrapPotentialDataValue(topLevelValue);
120
+ // If there's a path, try to resolve it using lodashGet on the baseValue
121
+ let finalValue;
122
+ if (path) {
123
+ try {
124
+ finalValue = lodashGet(baseValue, path);
125
+ // IMPORTANT: After getting a potentially nested value via path,
126
+ // we might *still* have a DataValue (if the context stores them nested).
127
+ // Unwrap again to be safe.
128
+ finalValue = unwrapPotentialDataValue(finalValue);
129
+ }
130
+ catch (error) {
131
+ console.warn(`Error accessing path "${path}" in ${sourceType} value for key "${key}":`, error);
132
+ return undefined; // Error during path access
133
+ }
134
+ }
135
+ else {
136
+ finalValue = baseValue; // No path, use the (potentially unwrapped) base value
137
+ }
138
+ // Return the raw final value
139
+ return finalValue;
140
+ }
141
+ // New function: Resolves and converts to string format suitable for TextNode
142
+ export function resolveExpressionToString(source, expression, sourceType) {
143
+ const finalValue = resolveExpressionRawValue(source, expression, sourceType);
144
+ if (finalValue === undefined) {
145
+ return undefined;
146
+ }
147
+ // Convert the final value to a string for TextNode context
148
+ if (typeof finalValue === 'object' && finalValue !== null) {
149
+ try {
150
+ return JSON.stringify(finalValue);
151
+ }
152
+ catch (error) {
153
+ console.warn(`Error stringifying object/array in ${sourceType} for expression "${expression}":`, error);
154
+ return '[object Object]'; // Fallback
155
+ }
156
+ }
157
+ // For primitives
158
+ return String(finalValue);
159
+ }
160
+ // Helper function to parse processing instructions like "indent 2" or "quote" into function name and parameter
66
161
  function parseProcessing(instruction) {
67
162
  const parts = instruction.trim().split(/\s+/);
68
163
  return {
@@ -83,29 +178,72 @@ function applyProcessing(value, processingChain) {
83
178
  return processingFunc(result, param);
84
179
  }, value);
85
180
  }
86
- export function interpolate(baseString, values) {
87
- return baseString
88
- .replace(TOKEN_MATCH_REGEX, (_m, p1) => {
89
- const [token, ...processing] = p1.split('|');
90
- const value = values[token.trim()];
91
- if (value === undefined)
181
+ export function interpolate(template, variables, graphInputValues, contextValues) {
182
+ return template
183
+ .replace(ESCAPED_TOKEN_REGEX, (_match, expression) => {
184
+ // Replace with \{\{expression\}\} to escape
185
+ return `\\{\\{${expression}\\}\\}`; // Escaped token
186
+ })
187
+ .replace(/\{\{((?:@graphInputs|@context)\..*?|[^}]+?)\}\}/g, (_match, expressionWithMaybeProcessing) => {
188
+ const parts = expressionWithMaybeProcessing.split('|').map((s) => s.trim());
189
+ const expression = parts[0]; // The variable name or path, e.g., @context.foo.bar or myVar
190
+ const processingChain = parts.slice(1).join('|'); // e.g., indent 2 | quote
191
+ let resolvedValue;
192
+ if (expression.startsWith('@graphInputs.')) {
193
+ // Use the new string-converting function
194
+ resolvedValue = resolveExpressionToString(graphInputValues, expression.substring('@graphInputs.'.length), 'graphInputs');
195
+ }
196
+ else if (expression.startsWith('@context.')) {
197
+ // Use the new string-converting function
198
+ resolvedValue = resolveExpressionToString(contextValues, expression.substring('@context.'.length), 'context');
199
+ }
200
+ else {
201
+ const simpleVar = variables[expression];
202
+ if (simpleVar !== undefined) {
203
+ // Simple variables might be DataValue or raw strings
204
+ resolvedValue = String(unwrapPotentialDataValue(simpleVar) ?? '');
205
+ }
206
+ else {
207
+ resolvedValue = undefined; // Variable not found
208
+ }
209
+ }
210
+ if (resolvedValue === undefined) {
211
+ // Return an empty string if the variable is not found or resolves to undefined
212
+ console.warn(`Interpolation variable or path "${expression}" not found or resolved to undefined.`);
92
213
  return '';
93
- if (processing.length > 0) {
94
- return applyProcessing(value, p1);
95
214
  }
96
- return value;
215
+ // Apply processing if any instructions exist
216
+ if (processingChain) {
217
+ return applyProcessing(resolvedValue, processingChain);
218
+ }
219
+ return resolvedValue;
97
220
  })
98
- .replace(ESCAPED_TOKEN_REGEX, (_m, p1) => {
99
- return `{{${p1}}}`;
221
+ .replace(ESCAPED_ESCAPED_TOKEN_REGEX, (_match, expression) => {
222
+ // Replace with {{expression}} to unescape
223
+ return `{{${expression}}}`; // Unescaped token
100
224
  });
101
225
  }
102
226
  // Extract all unique variable names from a template string
227
+ // Ignores variables starting with @graphInputs. or @context., as they are treated as special references.
103
228
  export function extractInterpolationVariables(template) {
104
- const matches = template.matchAll(TOKEN_MATCH_REGEX);
229
+ const matches = template
230
+ .replace(ESCAPED_TOKEN_REGEX, (_match, content) => {
231
+ // Replace escaped tokens with the escaped escaped version so they're not matched
232
+ return `\\{\\{${content}\\}\\}`;
233
+ })
234
+ .matchAll(TOKEN_MATCH_REGEX);
105
235
  const variables = new Set();
106
236
  for (const match of matches) {
107
- const [token] = match[1].split('|');
108
- variables.add(token.trim());
237
+ if (match[1]) {
238
+ const [tokenPart] = match[1].split('|');
239
+ if (tokenPart) {
240
+ const token = tokenPart.trim();
241
+ // Filter out special prefixes
242
+ if (!token.startsWith('@graphInputs.') && !token.startsWith('@context.')) {
243
+ variables.add(token);
244
+ }
245
+ }
246
+ }
109
247
  }
110
248
  return Array.from(variables);
111
249
  }
@@ -188,6 +188,30 @@ export const openaiModels = {
188
188
  },
189
189
  displayName: 'GPT-4o Audio (Preview)',
190
190
  },
191
+ 'gpt-4.1': {
192
+ maxTokens: 1_047_576,
193
+ cost: {
194
+ prompt: 2e-6,
195
+ completion: 8e-6,
196
+ },
197
+ displayName: 'GPT-4.1',
198
+ },
199
+ o3: {
200
+ maxTokens: 200_000,
201
+ cost: {
202
+ prompt: 10e-6,
203
+ completion: 40e-6,
204
+ },
205
+ displayName: 'o3',
206
+ },
207
+ 'o4-mini': {
208
+ maxTokens: 200_000,
209
+ cost: {
210
+ prompt: 1.1e-6,
211
+ completion: 4.4e-6,
212
+ },
213
+ displayName: 'o4-mini',
214
+ },
191
215
  'local-model': {
192
216
  maxTokens: Number.MAX_SAFE_INTEGER,
193
217
  cost: {
@@ -1,5 +1,5 @@
1
1
  import type { PascalCase } from 'type-fest';
2
- import { type AttachedData, type AudioProvider, type DataValue, type DatasetProvider, type ExternalFunction, type NativeApi, type NodeRegistration, type ProcessContext, type ProcessEvents, type Project, type RivetEventStreamFilterSpec, type Settings } from '../index.js';
2
+ import { type AttachedData, type AudioProvider, type DataValue, type DatasetProvider, type ExternalFunction, type MCPProvider, type NativeApi, type NodeRegistration, type ProcessContext, type ProcessEvents, type Project, type RivetEventStreamFilterSpec, type Settings } from '../index.js';
3
3
  import { GraphProcessor } from '../model/GraphProcessor.js';
4
4
  import type { Tokenizer } from '../integrations/Tokenizer.js';
5
5
  import { type LooseDataValue } from './looseDataValue.js';
@@ -12,6 +12,7 @@ export type RunGraphOptions = {
12
12
  nativeApi?: NativeApi;
13
13
  datasetProvider?: DatasetProvider;
14
14
  audioProvider?: AudioProvider;
15
+ mcpProvider?: MCPProvider;
15
16
  externalFunctions?: {
16
17
  [key: string]: ExternalFunction;
17
18
  };
@@ -19,7 +20,7 @@ export type RunGraphOptions = {
19
20
  [key: string]: (data: DataValue | undefined) => void;
20
21
  };
21
22
  abortSignal?: AbortSignal;
22
- registry?: NodeRegistration;
23
+ registry?: NodeRegistration<any, any>;
23
24
  includeTrace?: boolean;
24
25
  getChatNodeEndpoint?: ProcessContext['getChatNodeEndpoint'];
25
26
  tokenizer?: Tokenizer;
@@ -1,4 +1,4 @@
1
- import { type NodeId, type Inputs, type Outputs, type GraphOutputs, type GraphProcessor } from '../index.js';
1
+ import { type NodeId, type Inputs, type Outputs, type GraphOutputs, type GraphProcessor, type DataValue, type RunGraphOptions, type GraphEvents } from '../index.js';
2
2
  export type RivetEventStreamFilterSpec = {
3
3
  /** Stream partial output deltas for the specified node IDs or node titles. */
4
4
  partialOutputs?: string[] | true;
@@ -16,6 +16,8 @@ export type RivetEventStreamFilterSpec = {
16
16
  nodeStart?: string[] | true;
17
17
  /** Stream node finish events for the specified nodeIDs or node titles. */
18
18
  nodeFinish?: string[] | true;
19
+ /** Optional list of user events (comma separated list) to stream as part of the output. */
20
+ userStreamEvents?: string | undefined;
19
21
  };
20
22
  /** Map of all possible event names to their data for streaming events. */
21
23
  export type RivetEventStreamEvent = {
@@ -38,6 +40,9 @@ export type RivetEventStreamEvent = {
38
40
  done: {
39
41
  graphOutput: GraphOutputs;
40
42
  };
43
+ event: {
44
+ graphEvent: GraphEvents;
45
+ };
41
46
  error: {
42
47
  error: string;
43
48
  };
@@ -49,6 +54,7 @@ export type RivetEventStreamEventInfo = {
49
54
  }[keyof RivetEventStreamEvent];
50
55
  /** A simplified way to listen and stream processor events, including filtering. */
51
56
  export declare function getProcessorEvents(processor: GraphProcessor, spec: RivetEventStreamFilterSpec): AsyncGenerator<RivetEventStreamEventInfo, void>;
57
+ export declare const createOnStreamUserEvents: (eventList: string | undefined, handleUserEvent: (event: string, data: DataValue | undefined) => Promise<void>) => RunGraphOptions["onUserEvent"];
52
58
  /**
53
59
  * Creates a ReadableStream for processor events, following the Server-Sent Events protocol.
54
60
  * https://developer.mozilla.org/en-US/docs/Web/API/EventSource
@@ -14,6 +14,7 @@ export * from './model/ProcessContext.js';
14
14
  export * from './integrations/integrations.js';
15
15
  import './integrations/enableIntegrations.js';
16
16
  export * from './integrations/VectorDatabase.js';
17
+ export * from './integrations/mcp/MCPProvider.js';
17
18
  export * from './integrations/EmbeddingGenerator.js';
18
19
  export * from './integrations/LLMProvider.js';
19
20
  export * from './recording/ExecutionRecorder.js';
@@ -1,4 +1,5 @@
1
1
  import type { Inputs, Outputs } from '../index.js';
2
+ import type { DataValue } from '../model/DataValue.js';
2
3
  export interface CodeRunnerOptions {
3
4
  includeRequire: boolean;
4
5
  includeFetch: boolean;
@@ -8,11 +9,11 @@ export interface CodeRunnerOptions {
8
9
  }
9
10
  /** An object that can run arbitrary code (evals it). */
10
11
  export interface CodeRunner {
11
- runCode: (code: string, inputs: Inputs, options: CodeRunnerOptions) => Promise<Outputs>;
12
+ runCode: (code: string, inputs: Inputs, options: CodeRunnerOptions, graphInputs?: Record<string, DataValue>, contextValues?: Record<string, DataValue>) => Promise<Outputs>;
12
13
  }
13
14
  export declare class IsomorphicCodeRunner implements CodeRunner {
14
- runCode(code: string, inputs: Inputs, options: CodeRunnerOptions): Promise<Outputs>;
15
+ runCode(code: string, inputs: Inputs, options: CodeRunnerOptions, graphInputs?: Record<string, DataValue>, contextValues?: Record<string, DataValue>): Promise<Outputs>;
15
16
  }
16
17
  export declare class NotAllowedCodeRunner implements CodeRunner {
17
- runCode(_code: string, _inputs: Inputs, _options: CodeRunnerOptions): Promise<Outputs>;
18
+ runCode(_code: string, _inputs: Inputs, _options: CodeRunnerOptions, _graphInputs?: Record<string, DataValue>, _contextValues?: Record<string, DataValue>): Promise<Outputs>;
18
19
  }
@@ -0,0 +1,20 @@
1
+ import type { EditorDefinition } from '../../model/EditorDefinition.js';
2
+ import type { ChartNode, NodeInputDefinition } from '../../model/NodeBase.js';
3
+ import type { RivetUIContext } from '../../model/RivetUIContext.js';
4
+ import type { MCP } from './MCPProvider.js';
5
+ export interface MCPBaseNodeData {
6
+ name: string;
7
+ version: string;
8
+ transportType: MCP.TransportType;
9
+ serverUrl?: string;
10
+ serverId?: string;
11
+ headers: string;
12
+ useHeadersInput?: boolean;
13
+ useNameInput?: boolean;
14
+ useVersionInput?: boolean;
15
+ useServerUrlInput?: boolean;
16
+ useServerIdInput?: boolean;
17
+ }
18
+ export type MCPBaseNode = ChartNode<'mcpBase', MCPBaseNodeData>;
19
+ export declare const getMCPBaseInputs: (data: MCPBaseNodeData) => NodeInputDefinition[];
20
+ export declare const getMCPBaseEditors: (context: RivetUIContext, data: MCPBaseNodeData) => Promise<EditorDefinition<MCPBaseNode>[]>;