modelfusion 0.49.0 → 0.51.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. package/README.md +176 -158
  2. package/core/FunctionEvent.d.ts +9 -1
  3. package/core/FunctionOptions.d.ts +4 -0
  4. package/core/executeFunctionCall.cjs +85 -0
  5. package/core/executeFunctionCall.d.ts +10 -0
  6. package/core/executeFunctionCall.js +81 -0
  7. package/guard/GuardEvent.cjs +2 -0
  8. package/guard/GuardEvent.d.ts +7 -0
  9. package/guard/GuardEvent.js +1 -0
  10. package/guard/guard.cjs +60 -54
  11. package/guard/guard.d.ts +2 -1
  12. package/guard/guard.js +60 -54
  13. package/guard/index.cjs +1 -0
  14. package/guard/index.d.ts +1 -0
  15. package/guard/index.js +1 -0
  16. package/model-function/embed/EmbeddingEvent.d.ts +2 -2
  17. package/model-function/embed/embed.cjs +2 -2
  18. package/model-function/embed/embed.js +2 -2
  19. package/model-function/executeStandardCall.cjs +2 -0
  20. package/model-function/executeStandardCall.js +2 -0
  21. package/model-function/executeStreamCall.cjs +2 -0
  22. package/model-function/executeStreamCall.js +2 -0
  23. package/model-function/generate-image/ImageGenerationEvent.d.ts +2 -2
  24. package/model-function/generate-image/generateImage.cjs +1 -1
  25. package/model-function/generate-image/generateImage.js +1 -1
  26. package/model-function/generate-speech/SpeechGenerationEvent.d.ts +4 -4
  27. package/model-function/generate-speech/generateSpeech.cjs +1 -1
  28. package/model-function/generate-speech/generateSpeech.js +1 -1
  29. package/model-function/generate-speech/streamSpeech.cjs +1 -1
  30. package/model-function/generate-speech/streamSpeech.js +1 -1
  31. package/model-function/generate-structure/StructureGenerationEvent.d.ts +2 -2
  32. package/model-function/generate-structure/StructureStreamingEvent.d.ts +2 -2
  33. package/model-function/generate-structure/generateStructure.cjs +1 -1
  34. package/model-function/generate-structure/generateStructure.js +1 -1
  35. package/model-function/generate-structure/generateStructureOrText.cjs +1 -1
  36. package/model-function/generate-structure/generateStructureOrText.js +1 -1
  37. package/model-function/generate-structure/streamStructure.cjs +1 -1
  38. package/model-function/generate-structure/streamStructure.js +1 -1
  39. package/model-function/generate-text/TextGenerationEvent.d.ts +4 -4
  40. package/model-function/generate-text/generateText.cjs +1 -1
  41. package/model-function/generate-text/generateText.js +1 -1
  42. package/model-function/generate-text/streamText.cjs +1 -1
  43. package/model-function/generate-text/streamText.js +1 -1
  44. package/model-function/generate-transcription/TranscriptionEvent.d.ts +2 -2
  45. package/model-function/generate-transcription/generateTranscription.cjs +1 -1
  46. package/model-function/generate-transcription/generateTranscription.js +1 -1
  47. package/model-provider/elevenlabs/ElevenLabsError.cjs +0 -1
  48. package/model-provider/elevenlabs/ElevenLabsError.js +0 -1
  49. package/model-provider/elevenlabs/ElevenLabsSpeechModel.cjs +33 -5
  50. package/model-provider/elevenlabs/ElevenLabsSpeechModel.d.ts +6 -1
  51. package/model-provider/elevenlabs/ElevenLabsSpeechModel.js +33 -5
  52. package/model-provider/lmnt/LmntError.cjs +0 -1
  53. package/model-provider/lmnt/LmntError.js +0 -1
  54. package/model-provider/openai/OpenAICompletionModel.d.ts +4 -4
  55. package/model-provider/openai/OpenAICostCalculator.cjs +5 -5
  56. package/model-provider/openai/OpenAICostCalculator.js +5 -5
  57. package/model-provider/openai/OpenAITextEmbeddingModel.d.ts +3 -3
  58. package/model-provider/openai/chat/OpenAIChatModel.d.ts +7 -7
  59. package/package.json +1 -1
  60. package/retriever/retrieve.cjs +7 -75
  61. package/retriever/retrieve.js +7 -75
  62. package/tool/UseToolEvent.cjs +2 -0
  63. package/tool/UseToolEvent.d.ts +7 -0
  64. package/tool/UseToolEvent.js +1 -0
  65. package/tool/UseToolOrGenerateTextEvent.cjs +2 -0
  66. package/tool/UseToolOrGenerateTextEvent.d.ts +7 -0
  67. package/tool/UseToolOrGenerateTextEvent.js +1 -0
  68. package/tool/executeTool.cjs +2 -0
  69. package/tool/executeTool.js +2 -0
  70. package/tool/index.cjs +2 -0
  71. package/tool/index.d.ts +2 -0
  72. package/tool/index.js +2 -0
  73. package/tool/useTool.cjs +18 -10
  74. package/tool/useTool.js +18 -10
  75. package/tool/useToolOrGenerateText.cjs +34 -26
  76. package/tool/useToolOrGenerateText.js +34 -26
  77. package/vector-index/UpsertIntoVectorIndexEvent.cjs +2 -0
  78. package/vector-index/UpsertIntoVectorIndexEvent.d.ts +9 -0
  79. package/vector-index/UpsertIntoVectorIndexEvent.js +1 -0
  80. package/vector-index/VectorIndexRetriever.cjs +1 -4
  81. package/vector-index/VectorIndexRetriever.js +1 -4
  82. package/vector-index/index.cjs +1 -0
  83. package/vector-index/index.d.ts +1 -0
  84. package/vector-index/index.js +1 -0
  85. package/vector-index/upsertIntoVectorIndex.cjs +16 -7
  86. package/vector-index/upsertIntoVectorIndex.js +16 -7
package/README.md CHANGED
@@ -1,6 +1,6 @@
1
1
  # ModelFusion
2
2
 
3
- > ### Build multi-modal AI apps, chatbots, and agents with JavaScript and TypeScript.
3
+ > ### The TypeScript library for building multi-modal AI applications.
4
4
 
5
5
  [![NPM Version](https://img.shields.io/npm/v/modelfusion?color=33cd56&logo=npm)](https://www.npmjs.com/package/modelfusion)
6
6
  [![MIT License](https://img.shields.io/github/license/lgrammel/modelfusion)](https://opensource.org/licenses/MIT)
@@ -10,21 +10,22 @@
10
10
 
11
11
  [Introduction](#introduction) | [Quick Install](#quick-install) | [Usage](#usage-examples) | [Documentation](#documentation) | [Examples](#more-examples) | [Contributing](#contributing) | [modelfusion.dev](https://modelfusion.dev)
12
12
 
13
- > [!NOTE]
14
- > ModelFusion is in its initial development phase. Until version 1.0 there may be breaking changes, because I am still exploring the API design. Feedback and suggestions are welcome.
15
-
16
13
  ## Introduction
17
14
 
18
- ModelFusion is a library for building AI apps, chatbots, and agents. It provides abstractions for AI models, vector indices, and tools.
15
+ **ModelFusion** is a TypeScript library for building AI applications, chatbots, and agents.
19
16
 
20
- - **Multimodal Support**: Beyond just LLMs, ModelFusion encompasses a diverse array of models including text generation, text-to-speech, speech-to-text, and image generation, allowing you to build multi-modal AI applications with ease.
21
- - **Flexibility and control**: AI application development can be complex and unique to each project. With ModelFusion, you have complete control over the prompts and model settings, and you can access the raw responses from the models quickly to build what you need.
22
- - **Type inference and validation**: ModelFusion uses TypeScript to infer types wherever possible and to validate model responses. By default, [Zod](https://github.com/colinhacks/zod) is used for type validation, but you can also use other libraries.
23
- - **Guards**: ModelFusion provides a guard function that you can use to implement retry on error, redacting and changing reponses, etc.
24
- - **Integrated support features**: Essential features like **observability**, logging, retries, throttling, tracing, and error handling are built-in, helping you focus more on building your application.
17
+ - **Multimodal**: ModelFusion supports a wide range of models including text generation, image generation, text-to-speech, speech-to-text, and embedding models.
18
+ - **Streaming**: ModelFusion supports streaming for many generation models, e.g. text streaming, structure streaming, and full duplex speech streaming.
19
+ - **Utility functions**: ModelFusion provides functionality for tools and tool usage, vector indices, and guards functions.
20
+ - **Type inference and validation**: ModelFusion infers TypeScript types wherever possible and to validates model responses.
21
+ - **Observability and logging**: ModelFusion provides an observer framework and out-of-the-box logging support.
22
+ - **Resilience and Robustness**: ModelFusion ensures seamless operation through automatic retries, throttling, and error handling mechanisms.
25
23
 
26
24
  ## Quick Install
27
25
 
26
+ > [!NOTE]
27
+ > ModelFusion is in its initial development phase. The main API is now mostly stable, but until version 1.0 there may be minor breaking changes. Feedback and suggestions are welcome.
28
+
28
29
  ```sh
29
30
  npm install modelfusion
30
31
  ```
@@ -71,6 +72,84 @@ for await (const textFragment of textStream) {
71
72
 
72
73
  Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [Anthropic](https://modelfusion.dev/integration/model-provider/anthropic), [Cohere](https://modelfusion.dev/integration/model-provider/cohere), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp)
73
74
 
75
+ ### [Generate Image](https://modelfusion.dev/guide/function/generate-image)
76
+
77
+ Generate an image from a prompt.
78
+
79
+ ```ts
80
+ const image = await generateImage(
81
+ new OpenAIImageGenerationModel({ size: "512x512" }),
82
+ "the wicked witch of the west in the style of early 19th century painting"
83
+ );
84
+ ```
85
+
86
+ Providers: [OpenAI (Dall·E)](https://modelfusion.dev/integration/model-provider/openai), [Stability AI](https://modelfusion.dev/integration/model-provider/stability), [Automatic1111](https://modelfusion.dev/integration/model-provider/automatic1111)
87
+
88
+ ### [Generate Speech](https://modelfusion.dev/guide/function/generate-speech)
89
+
90
+ Synthesize speech (audio) from text. Also called TTS (text-to-speech).
91
+
92
+ #### generateSpeech
93
+
94
+ `generateSpeech` synthesizes speech from text.
95
+
96
+ ```ts
97
+ // `speech` is a Buffer with MP3 audio data
98
+ const speech = await generateSpeech(
99
+ new LmntSpeechModel({
100
+ voice: "034b632b-df71-46c8-b440-86a42ffc3cf3", // Henry
101
+ }),
102
+ "Good evening, ladies and gentlemen! Exciting news on the airwaves tonight " +
103
+ "as The Rolling Stones unveil 'Hackney Diamonds,' their first collection of " +
104
+ "fresh tunes in nearly twenty years, featuring the illustrious Lady Gaga, the " +
105
+ "magical Stevie Wonder, and the final beats from the late Charlie Watts."
106
+ );
107
+ ```
108
+
109
+ Providers: [Eleven Labs](https://modelfusion.dev/integration/model-provider/elevenlabs), [LMNT](https://modelfusion.dev/integration/model-provider/lmnt)
110
+
111
+ #### streamSpeech
112
+
113
+ `generateSpeech` generates a stream of speech chunks from text or from a text stream. Depending on the model, this can be fully duplex.
114
+
115
+ ```ts
116
+ const textStream = await streamText(/* ... */);
117
+
118
+ const speechStream = await streamSpeech(
119
+ new ElevenLabsSpeechModel({
120
+ voice: "pNInz6obpgDQGcFmaJgB", // Adam
121
+ optimizeStreamingLatency: 1,
122
+ voiceSettings: { stability: 1, similarityBoost: 0.35 },
123
+ generationConfig: {
124
+ chunkLengthSchedule: [50, 90, 120, 150, 200],
125
+ },
126
+ }),
127
+ textStream
128
+ );
129
+
130
+ for await (const part of speechStream) {
131
+ // each part is a Buffer with MP3 audio data
132
+ }
133
+ ```
134
+
135
+ Providers: [Eleven Labs](https://modelfusion.dev/integration/model-provider/elevenlabs)
136
+
137
+ ### [Generate Transcription](https://modelfusion.dev/guide/function/generate-transcription)
138
+
139
+ Transcribe speech (audio) data into text. Also called speech-to-text (STT).
140
+
141
+ ```ts
142
+ const transcription = await generateTranscription(
143
+ new OpenAITranscriptionModel({ model: "whisper-1" }),
144
+ {
145
+ type: "mp3",
146
+ data: await fs.promises.readFile("data/test.mp3"),
147
+ }
148
+ );
149
+ ```
150
+
151
+ Providers: [OpenAI (Whisper)](https://modelfusion.dev/integration/model-provider/openai)
152
+
74
153
  ### [Generate Structure](https://modelfusion.dev/guide/function/generate-structure#generatestructure)
75
154
 
76
155
  Generate typed objects using a language model and a schema.
@@ -189,149 +268,6 @@ const { structure, value, text } = await generateStructureOrText(
189
268
 
190
269
  Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai)
191
270
 
192
- ### [Tools](https://modelfusion.dev/guide/tools)
193
-
194
- Tools are functions that can be executed by an AI model. They are useful for building chatbots and agents.
195
-
196
- Predefined tools: [SerpAPI](https://modelfusion.dev/integration/tool/serpapi), [Google Custom Search](https://modelfusion.dev/integration/tool/google-custom-search)
197
-
198
- #### Create Tool
199
-
200
- A tool is a function with a name, a description, and a schema for the input parameters.
201
-
202
- ```ts
203
- const calculator = new Tool({
204
- name: "calculator",
205
- description: "Execute a calculation",
206
-
207
- inputSchema: new ZodSchema(
208
- z.object({
209
- a: z.number().describe("The first number."),
210
- b: z.number().describe("The second number."),
211
- operator: z
212
- .enum(["+", "-", "*", "/"])
213
- .describe("The operator (+, -, *, /)."),
214
- })
215
- ),
216
-
217
- execute: async ({ a, b, operator }) => {
218
- switch (operator) {
219
- case "+":
220
- return a + b;
221
- case "-":
222
- return a - b;
223
- case "*":
224
- return a * b;
225
- case "/":
226
- return a / b;
227
- default:
228
- throw new Error(`Unknown operator: ${operator}`);
229
- }
230
- },
231
- });
232
- ```
233
-
234
- #### useTool
235
-
236
- The model determines the parameters for the tool from the prompt and then executes it.
237
-
238
- ```ts
239
- const { tool, parameters, result } = await useTool(
240
- new OpenAIChatModel({ model: "gpt-3.5-turbo" }),
241
- calculator,
242
- [OpenAIChatMessage.user("What's fourteen times twelve?")]
243
- );
244
- ```
245
-
246
- #### useToolOrGenerateText
247
-
248
- The model determines which tool to use and its parameters from the prompt and then executes it.
249
- Text is generated as a fallback.
250
-
251
- ```ts
252
- const { tool, parameters, result, text } = await useToolOrGenerateText(
253
- new OpenAIChatModel({ model: "gpt-3.5-turbo" }),
254
- [calculator /* and other tools... */],
255
- [OpenAIChatMessage.user("What's fourteen times twelve?")]
256
- );
257
- ```
258
-
259
- ### [Generate Transcription](https://modelfusion.dev/guide/function/generate-transcription)
260
-
261
- Transcribe speech (audio) data into text. Also called speech-to-text (STT).
262
-
263
- ```ts
264
- const transcription = await generateTranscription(
265
- new OpenAITranscriptionModel({ model: "whisper-1" }),
266
- {
267
- type: "mp3",
268
- data: await fs.promises.readFile("data/test.mp3"),
269
- }
270
- );
271
- ```
272
-
273
- Providers: [OpenAI (Whisper)](https://modelfusion.dev/integration/model-provider/openai)
274
-
275
- ### [Generate Speech](https://modelfusion.dev/guide/function/generate-speech)
276
-
277
- Synthesize speech (audio) from text. Also called TTS (text-to-speech).
278
-
279
- Providers: [Eleven Labs](https://modelfusion.dev/integration/model-provider/elevenlabs), [LMNT](https://modelfusion.dev/integration/model-provider/lmnt)
280
-
281
- #### generateSpeech
282
-
283
- `generateSpeech` synthesizes speech from text.
284
-
285
- ```ts
286
- // `speech` is a Buffer with MP3 audio data
287
- const speech = await generateSpeech(
288
- new LmntSpeechModel({
289
- voice: "034b632b-df71-46c8-b440-86a42ffc3cf3", // Henry
290
- }),
291
- "Good evening, ladies and gentlemen! Exciting news on the airwaves tonight " +
292
- "as The Rolling Stones unveil 'Hackney Diamonds,' their first collection of " +
293
- "fresh tunes in nearly twenty years, featuring the illustrious Lady Gaga, the " +
294
- "magical Stevie Wonder, and the final beats from the late Charlie Watts."
295
- );
296
- ```
297
-
298
- #### streamSpeech
299
-
300
- `generateSpeech` generates a stream of speech chunks from text or from a text stream. Depending on the model, this can be fully duplex.
301
-
302
- ```ts
303
- const textStream = await streamText(/* ... */);
304
-
305
- const speechStream = await streamSpeech(
306
- new ElevenLabsSpeechModel({
307
- voice: "pNInz6obpgDQGcFmaJgB", // Adam
308
- model: "eleven_monolingual_v1",
309
- voiceSettings: { stability: 1, similarityBoost: 0.35 },
310
- generationConfig: {
311
- chunkLengthSchedule: [50, 90, 120, 150, 200],
312
- },
313
- }),
314
- textStream
315
- );
316
-
317
- for await (const part of speechStream) {
318
- // each part is a Buffer with MP3 audio data
319
- }
320
- ```
321
-
322
- ### [Generate Image](https://modelfusion.dev/guide/function/generate-image)
323
-
324
- Generate an image from a prompt.
325
-
326
- ```ts
327
- const image = await generateImage(
328
- new OpenAIImageGenerationModel({ size: "512x512" }),
329
- "the wicked witch of the west in the style of early 19th century painting"
330
- );
331
- ```
332
-
333
- Providers: [OpenAI (Dall·E)](https://modelfusion.dev/integration/model-provider/openai), [Stability AI](https://modelfusion.dev/integration/model-provider/stability), [Automatic1111](https://modelfusion.dev/integration/model-provider/automatic1111)
334
-
335
271
  ### [Embed Value](https://modelfusion.dev/guide/function/embed)
336
272
 
337
273
  Create embeddings for text and other values. Embeddings are vectors that represent the essence of the values in the context of the model.
@@ -381,7 +317,7 @@ Guard functions can be used to implement retry on error, redacting and changing
381
317
 
382
318
  ```ts
383
319
  const result = await guard(
384
- (input) =>
320
+ (input, options) =>
385
321
  generateStructure(
386
322
  new OpenAIChatModel({
387
323
  // ...
@@ -389,7 +325,8 @@ const result = await guard(
389
325
  new ZodStructureDefinition({
390
326
  // ...
391
327
  }),
392
- input
328
+ input,
329
+ options
393
330
  ),
394
331
  [
395
332
  // ...
@@ -408,7 +345,74 @@ const result = await guard(
408
345
  );
409
346
  ```
410
347
 
411
- ### [Upserting and Retrieving Objects from Vector Indices](https://modelfusion.dev/guide/vector-index)
348
+ ### [Tools](https://modelfusion.dev/guide/tools)
349
+
350
+ Tools are functions that can be executed by an AI model. They are useful for building chatbots and agents.
351
+
352
+ Predefined tools: [SerpAPI](https://modelfusion.dev/integration/tool/serpapi), [Google Custom Search](https://modelfusion.dev/integration/tool/google-custom-search)
353
+
354
+ #### Create Tool
355
+
356
+ A tool is a function with a name, a description, and a schema for the input parameters.
357
+
358
+ ```ts
359
+ const calculator = new Tool({
360
+ name: "calculator",
361
+ description: "Execute a calculation",
362
+
363
+ inputSchema: new ZodSchema(
364
+ z.object({
365
+ a: z.number().describe("The first number."),
366
+ b: z.number().describe("The second number."),
367
+ operator: z
368
+ .enum(["+", "-", "*", "/"])
369
+ .describe("The operator (+, -, *, /)."),
370
+ })
371
+ ),
372
+
373
+ execute: async ({ a, b, operator }) => {
374
+ switch (operator) {
375
+ case "+":
376
+ return a + b;
377
+ case "-":
378
+ return a - b;
379
+ case "*":
380
+ return a * b;
381
+ case "/":
382
+ return a / b;
383
+ default:
384
+ throw new Error(`Unknown operator: ${operator}`);
385
+ }
386
+ },
387
+ });
388
+ ```
389
+
390
+ #### useTool
391
+
392
+ The model determines the parameters for the tool from the prompt and then executes it.
393
+
394
+ ```ts
395
+ const { tool, parameters, result } = await useTool(
396
+ new OpenAIChatModel({ model: "gpt-3.5-turbo" }),
397
+ calculator,
398
+ [OpenAIChatMessage.user("What's fourteen times twelve?")]
399
+ );
400
+ ```
401
+
402
+ #### useToolOrGenerateText
403
+
404
+ The model determines which tool to use and its parameters from the prompt and then executes it.
405
+ Text is generated as a fallback.
406
+
407
+ ```ts
408
+ const { tool, parameters, result, text } = await useToolOrGenerateText(
409
+ new OpenAIChatModel({ model: "gpt-3.5-turbo" }),
410
+ [calculator /* and other tools... */],
411
+ [OpenAIChatMessage.user("What's fourteen times twelve?")]
412
+ );
413
+ ```
414
+
415
+ ### [Vector Indices](https://modelfusion.dev/guide/vector-index)
412
416
 
413
417
  ```ts
414
418
  const texts = [
@@ -529,9 +533,15 @@ for (const choice of (response as OpenAICompletionResponse).choices) {
529
533
  }
530
534
  ```
531
535
 
532
- ### Observability
536
+ ### Logging and Observability
537
+
538
+ ModelFusion provides an [observer framework](https://modelfusion.dev/guide/util/observer) and [out-of-the-box logging support](https://modelfusion.dev/guide/util/logging). You can easily trace runs and call hierarchies, and you can add your own observers.
533
539
 
534
- Integrations: [Helicone](https://modelfusion.dev/integration/observability/helicone)
540
+ #### Global Logging Example
541
+
542
+ ```ts
543
+ setGlobalFunctionLogging("detailed-object"); // log full events
544
+ ```
535
545
 
536
546
  ## Documentation
537
547
 
@@ -654,3 +664,11 @@ Generate text on a Cloudflare Worker using ModelFusion and OpenAI.
654
664
  ### [Contributing Guide](https://github.com/lgrammel/modelfusion/blob/main/CONTRIBUTING.md)
655
665
 
656
666
  Read the [ModelFusion contributing guide](https://github.com/lgrammel/modelfusion/blob/main/CONTRIBUTING.md) to learn about the development process, how to propose bugfixes and improvements, and how to build and test your changes.
667
+
668
+ ```
669
+
670
+ ```
671
+
672
+ ```
673
+
674
+ ```
@@ -1,11 +1,19 @@
1
+ import { GuardFinishedEvent, GuardStartedEvent } from "../guard/GuardEvent.js";
1
2
  import { ModelCallFinishedEvent, ModelCallStartedEvent } from "../model-function/ModelCallEvent.js";
2
3
  import { RetrieveFinishedEvent, RetrieveStartedEvent } from "../retriever/RetrieveEvent.js";
3
4
  import { ExecuteToolFinishedEvent, ExecuteToolStartedEvent } from "../tool/ExecuteToolEvent.js";
5
+ import { UseToolFinishedEvent, UseToolStartedEvent } from "../tool/UseToolEvent.js";
6
+ import { UseToolOrGenerateTextFinishedEvent, UseToolOrGenerateTextStartedEvent } from "../tool/UseToolOrGenerateTextEvent.js";
7
+ import { UpsertIntoVectorIndexFinishedEvent, UpsertIntoVectorIndexStartedEvent } from "../vector-index/UpsertIntoVectorIndexEvent.js";
4
8
  export interface BaseFunctionEvent {
5
9
  /**
6
10
  * Unique identifier for the function call.
7
11
  */
8
12
  callId: string | undefined;
13
+ /**
14
+ * Unique identifier of the call id of the parent function.
15
+ */
16
+ parentCallId?: string | undefined;
9
17
  /**
10
18
  * Optional unique identifier for the function.
11
19
  */
@@ -73,4 +81,4 @@ export interface BaseFunctionFinishedEvent extends BaseFunctionEvent {
73
81
  */
74
82
  result: BaseFunctionFinishedEventResult;
75
83
  }
76
- export type FunctionEvent = ModelCallStartedEvent | ExecuteToolStartedEvent | RetrieveStartedEvent | ModelCallFinishedEvent | ExecuteToolFinishedEvent | RetrieveFinishedEvent;
84
+ export type FunctionEvent = ExecuteToolStartedEvent | ExecuteToolFinishedEvent | GuardStartedEvent | GuardFinishedEvent | ModelCallStartedEvent | ModelCallFinishedEvent | RetrieveStartedEvent | RetrieveFinishedEvent | UpsertIntoVectorIndexStartedEvent | UpsertIntoVectorIndexFinishedEvent | UseToolStartedEvent | UseToolFinishedEvent | UseToolOrGenerateTextStartedEvent | UseToolOrGenerateTextFinishedEvent;
@@ -21,6 +21,10 @@ export type FunctionOptions = {
21
21
  * Optional run as part of which this function is called.
22
22
  */
23
23
  run?: Run;
24
+ /**
25
+ * Unique identifier of the call id of the parent function.
26
+ */
27
+ parentCallId?: string | undefined;
24
28
  };
25
29
  /**
26
30
  * The logging to use for the function. Logs are sent to the console.
@@ -0,0 +1,85 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.executeFunctionCall = void 0;
4
+ const nanoid_1 = require("nanoid");
5
+ const FunctionEventSource_js_1 = require("./FunctionEventSource.cjs");
6
+ const GlobalFunctionLogging_js_1 = require("./GlobalFunctionLogging.cjs");
7
+ const GlobalFunctionObservers_js_1 = require("./GlobalFunctionObservers.cjs");
8
+ const AbortError_js_1 = require("./api/AbortError.cjs");
9
+ const getFunctionCallLogger_js_1 = require("./getFunctionCallLogger.cjs");
10
+ const getRun_js_1 = require("./getRun.cjs");
11
+ const DurationMeasurement_js_1 = require("../util/DurationMeasurement.cjs");
12
+ const runSafe_js_1 = require("../util/runSafe.cjs");
13
+ async function executeFunctionCall({ options, input, functionType, execute, inputPropertyName = "input", outputPropertyName = "value", }) {
14
+ const run = await (0, getRun_js_1.getRun)(options?.run);
15
+ const eventSource = new FunctionEventSource_js_1.FunctionEventSource({
16
+ observers: [
17
+ ...(0, getFunctionCallLogger_js_1.getFunctionCallLogger)(options?.logging ?? (0, GlobalFunctionLogging_js_1.getGlobalFunctionLogging)()),
18
+ ...(0, GlobalFunctionObservers_js_1.getGlobalFunctionObservers)(),
19
+ ...(run?.functionObserver != null ? [run.functionObserver] : []),
20
+ ...(options?.observers ?? []),
21
+ ],
22
+ errorHandler: run?.errorHandler,
23
+ });
24
+ const durationMeasurement = (0, DurationMeasurement_js_1.startDurationMeasurement)();
25
+ const startMetadata = {
26
+ functionType,
27
+ callId: `call-${(0, nanoid_1.nanoid)()}`,
28
+ parentCallId: options?.parentCallId,
29
+ runId: run?.runId,
30
+ sessionId: run?.sessionId,
31
+ userId: run?.userId,
32
+ functionId: options?.functionId,
33
+ [inputPropertyName]: input,
34
+ timestamp: durationMeasurement.startDate,
35
+ startTimestamp: durationMeasurement.startDate,
36
+ };
37
+ eventSource.notify({
38
+ eventType: "started",
39
+ ...startMetadata,
40
+ });
41
+ const result = await (0, runSafe_js_1.runSafe)(() => execute({
42
+ functionId: options?.functionId,
43
+ logging: options?.logging,
44
+ observers: options?.observers,
45
+ run,
46
+ parentCallId: startMetadata.callId,
47
+ }));
48
+ const finishMetadata = {
49
+ eventType: "finished",
50
+ ...startMetadata,
51
+ finishTimestamp: new Date(),
52
+ durationInMs: durationMeasurement.durationInMs,
53
+ };
54
+ if (!result.ok) {
55
+ if (result.isAborted) {
56
+ eventSource.notify({
57
+ ...finishMetadata,
58
+ eventType: "finished",
59
+ result: {
60
+ status: "abort",
61
+ },
62
+ });
63
+ throw new AbortError_js_1.AbortError();
64
+ }
65
+ eventSource.notify({
66
+ ...finishMetadata,
67
+ eventType: "finished",
68
+ result: {
69
+ status: "error",
70
+ error: result.error,
71
+ },
72
+ });
73
+ throw result.error;
74
+ }
75
+ eventSource.notify({
76
+ ...finishMetadata,
77
+ eventType: "finished",
78
+ result: {
79
+ status: "success",
80
+ [outputPropertyName]: result.value,
81
+ },
82
+ });
83
+ return result.value;
84
+ }
85
+ exports.executeFunctionCall = executeFunctionCall;
@@ -0,0 +1,10 @@
1
+ import { FunctionOptions } from "./FunctionOptions.js";
2
+ import { FunctionEvent } from "./FunctionEvent.js";
3
+ export declare function executeFunctionCall<VALUE>({ options, input, functionType, execute, inputPropertyName, outputPropertyName, }: {
4
+ options?: FunctionOptions;
5
+ input: unknown;
6
+ functionType: FunctionEvent["functionType"];
7
+ execute: (options?: FunctionOptions) => PromiseLike<VALUE>;
8
+ inputPropertyName?: string;
9
+ outputPropertyName?: string;
10
+ }): Promise<VALUE>;
@@ -0,0 +1,81 @@
1
+ import { nanoid as createId } from "nanoid";
2
+ import { FunctionEventSource } from "./FunctionEventSource.js";
3
+ import { getGlobalFunctionLogging } from "./GlobalFunctionLogging.js";
4
+ import { getGlobalFunctionObservers } from "./GlobalFunctionObservers.js";
5
+ import { AbortError } from "./api/AbortError.js";
6
+ import { getFunctionCallLogger } from "./getFunctionCallLogger.js";
7
+ import { getRun } from "./getRun.js";
8
+ import { startDurationMeasurement } from "../util/DurationMeasurement.js";
9
+ import { runSafe } from "../util/runSafe.js";
10
+ export async function executeFunctionCall({ options, input, functionType, execute, inputPropertyName = "input", outputPropertyName = "value", }) {
11
+ const run = await getRun(options?.run);
12
+ const eventSource = new FunctionEventSource({
13
+ observers: [
14
+ ...getFunctionCallLogger(options?.logging ?? getGlobalFunctionLogging()),
15
+ ...getGlobalFunctionObservers(),
16
+ ...(run?.functionObserver != null ? [run.functionObserver] : []),
17
+ ...(options?.observers ?? []),
18
+ ],
19
+ errorHandler: run?.errorHandler,
20
+ });
21
+ const durationMeasurement = startDurationMeasurement();
22
+ const startMetadata = {
23
+ functionType,
24
+ callId: `call-${createId()}`,
25
+ parentCallId: options?.parentCallId,
26
+ runId: run?.runId,
27
+ sessionId: run?.sessionId,
28
+ userId: run?.userId,
29
+ functionId: options?.functionId,
30
+ [inputPropertyName]: input,
31
+ timestamp: durationMeasurement.startDate,
32
+ startTimestamp: durationMeasurement.startDate,
33
+ };
34
+ eventSource.notify({
35
+ eventType: "started",
36
+ ...startMetadata,
37
+ });
38
+ const result = await runSafe(() => execute({
39
+ functionId: options?.functionId,
40
+ logging: options?.logging,
41
+ observers: options?.observers,
42
+ run,
43
+ parentCallId: startMetadata.callId,
44
+ }));
45
+ const finishMetadata = {
46
+ eventType: "finished",
47
+ ...startMetadata,
48
+ finishTimestamp: new Date(),
49
+ durationInMs: durationMeasurement.durationInMs,
50
+ };
51
+ if (!result.ok) {
52
+ if (result.isAborted) {
53
+ eventSource.notify({
54
+ ...finishMetadata,
55
+ eventType: "finished",
56
+ result: {
57
+ status: "abort",
58
+ },
59
+ });
60
+ throw new AbortError();
61
+ }
62
+ eventSource.notify({
63
+ ...finishMetadata,
64
+ eventType: "finished",
65
+ result: {
66
+ status: "error",
67
+ error: result.error,
68
+ },
69
+ });
70
+ throw result.error;
71
+ }
72
+ eventSource.notify({
73
+ ...finishMetadata,
74
+ eventType: "finished",
75
+ result: {
76
+ status: "success",
77
+ [outputPropertyName]: result.value,
78
+ },
79
+ });
80
+ return result.value;
81
+ }
@@ -0,0 +1,2 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
@@ -0,0 +1,7 @@
1
+ import { BaseFunctionFinishedEvent, BaseFunctionStartedEvent } from "../core/FunctionEvent.js";
2
+ export interface GuardStartedEvent extends BaseFunctionStartedEvent {
3
+ functionType: "guard";
4
+ }
5
+ export interface GuardFinishedEvent extends BaseFunctionFinishedEvent {
6
+ functionType: "guard";
7
+ }
@@ -0,0 +1 @@
1
+ export {};