modelfusion 0.49.0 → 0.51.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. package/README.md +176 -158
  2. package/core/FunctionEvent.d.ts +9 -1
  3. package/core/FunctionOptions.d.ts +4 -0
  4. package/core/executeFunctionCall.cjs +85 -0
  5. package/core/executeFunctionCall.d.ts +10 -0
  6. package/core/executeFunctionCall.js +81 -0
  7. package/guard/GuardEvent.cjs +2 -0
  8. package/guard/GuardEvent.d.ts +7 -0
  9. package/guard/GuardEvent.js +1 -0
  10. package/guard/guard.cjs +60 -54
  11. package/guard/guard.d.ts +2 -1
  12. package/guard/guard.js +60 -54
  13. package/guard/index.cjs +1 -0
  14. package/guard/index.d.ts +1 -0
  15. package/guard/index.js +1 -0
  16. package/model-function/embed/EmbeddingEvent.d.ts +2 -2
  17. package/model-function/embed/embed.cjs +2 -2
  18. package/model-function/embed/embed.js +2 -2
  19. package/model-function/executeStandardCall.cjs +2 -0
  20. package/model-function/executeStandardCall.js +2 -0
  21. package/model-function/executeStreamCall.cjs +2 -0
  22. package/model-function/executeStreamCall.js +2 -0
  23. package/model-function/generate-image/ImageGenerationEvent.d.ts +2 -2
  24. package/model-function/generate-image/generateImage.cjs +1 -1
  25. package/model-function/generate-image/generateImage.js +1 -1
  26. package/model-function/generate-speech/SpeechGenerationEvent.d.ts +4 -4
  27. package/model-function/generate-speech/generateSpeech.cjs +1 -1
  28. package/model-function/generate-speech/generateSpeech.js +1 -1
  29. package/model-function/generate-speech/streamSpeech.cjs +1 -1
  30. package/model-function/generate-speech/streamSpeech.js +1 -1
  31. package/model-function/generate-structure/StructureGenerationEvent.d.ts +2 -2
  32. package/model-function/generate-structure/StructureStreamingEvent.d.ts +2 -2
  33. package/model-function/generate-structure/generateStructure.cjs +1 -1
  34. package/model-function/generate-structure/generateStructure.js +1 -1
  35. package/model-function/generate-structure/generateStructureOrText.cjs +1 -1
  36. package/model-function/generate-structure/generateStructureOrText.js +1 -1
  37. package/model-function/generate-structure/streamStructure.cjs +1 -1
  38. package/model-function/generate-structure/streamStructure.js +1 -1
  39. package/model-function/generate-text/TextGenerationEvent.d.ts +4 -4
  40. package/model-function/generate-text/generateText.cjs +1 -1
  41. package/model-function/generate-text/generateText.js +1 -1
  42. package/model-function/generate-text/streamText.cjs +1 -1
  43. package/model-function/generate-text/streamText.js +1 -1
  44. package/model-function/generate-transcription/TranscriptionEvent.d.ts +2 -2
  45. package/model-function/generate-transcription/generateTranscription.cjs +1 -1
  46. package/model-function/generate-transcription/generateTranscription.js +1 -1
  47. package/model-provider/elevenlabs/ElevenLabsError.cjs +0 -1
  48. package/model-provider/elevenlabs/ElevenLabsError.js +0 -1
  49. package/model-provider/elevenlabs/ElevenLabsSpeechModel.cjs +33 -5
  50. package/model-provider/elevenlabs/ElevenLabsSpeechModel.d.ts +6 -1
  51. package/model-provider/elevenlabs/ElevenLabsSpeechModel.js +33 -5
  52. package/model-provider/lmnt/LmntError.cjs +0 -1
  53. package/model-provider/lmnt/LmntError.js +0 -1
  54. package/model-provider/openai/OpenAICompletionModel.d.ts +4 -4
  55. package/model-provider/openai/OpenAICostCalculator.cjs +5 -5
  56. package/model-provider/openai/OpenAICostCalculator.js +5 -5
  57. package/model-provider/openai/OpenAITextEmbeddingModel.d.ts +3 -3
  58. package/model-provider/openai/chat/OpenAIChatModel.d.ts +7 -7
  59. package/package.json +1 -1
  60. package/retriever/retrieve.cjs +7 -75
  61. package/retriever/retrieve.js +7 -75
  62. package/tool/UseToolEvent.cjs +2 -0
  63. package/tool/UseToolEvent.d.ts +7 -0
  64. package/tool/UseToolEvent.js +1 -0
  65. package/tool/UseToolOrGenerateTextEvent.cjs +2 -0
  66. package/tool/UseToolOrGenerateTextEvent.d.ts +7 -0
  67. package/tool/UseToolOrGenerateTextEvent.js +1 -0
  68. package/tool/executeTool.cjs +2 -0
  69. package/tool/executeTool.js +2 -0
  70. package/tool/index.cjs +2 -0
  71. package/tool/index.d.ts +2 -0
  72. package/tool/index.js +2 -0
  73. package/tool/useTool.cjs +18 -10
  74. package/tool/useTool.js +18 -10
  75. package/tool/useToolOrGenerateText.cjs +34 -26
  76. package/tool/useToolOrGenerateText.js +34 -26
  77. package/vector-index/UpsertIntoVectorIndexEvent.cjs +2 -0
  78. package/vector-index/UpsertIntoVectorIndexEvent.d.ts +9 -0
  79. package/vector-index/UpsertIntoVectorIndexEvent.js +1 -0
  80. package/vector-index/VectorIndexRetriever.cjs +1 -4
  81. package/vector-index/VectorIndexRetriever.js +1 -4
  82. package/vector-index/index.cjs +1 -0
  83. package/vector-index/index.d.ts +1 -0
  84. package/vector-index/index.js +1 -0
  85. package/vector-index/upsertIntoVectorIndex.cjs +16 -7
  86. package/vector-index/upsertIntoVectorIndex.js +16 -7
@@ -7,7 +7,7 @@ function streamText(model, prompt, options) {
7
7
  let accumulatedText = "";
8
8
  let lastFullDelta;
9
9
  return new AsyncIterableResultPromise_js_1.AsyncIterableResultPromise((0, executeStreamCall_js_1.executeStreamCall)({
10
- functionType: "text-streaming",
10
+ functionType: "stream-text",
11
11
  input: prompt,
12
12
  model,
13
13
  options,
@@ -4,7 +4,7 @@ export function streamText(model, prompt, options) {
4
4
  let accumulatedText = "";
5
5
  let lastFullDelta;
6
6
  return new AsyncIterableResultPromise(executeStreamCall({
7
- functionType: "text-streaming",
7
+ functionType: "stream-text",
8
8
  input: prompt,
9
9
  model,
10
10
  options,
@@ -1,6 +1,6 @@
1
1
  import { BaseModelCallFinishedEvent, BaseModelCallStartedEvent } from "../ModelCallEvent.js";
2
2
  export interface TranscriptionStartedEvent extends BaseModelCallStartedEvent {
3
- functionType: "transcription";
3
+ functionType: "generate-transcription";
4
4
  }
5
5
  export type TranscriptionFinishedEventResult = {
6
6
  status: "success";
@@ -13,6 +13,6 @@ export type TranscriptionFinishedEventResult = {
13
13
  status: "abort";
14
14
  };
15
15
  export interface TranscriptionFinishedEvent extends BaseModelCallFinishedEvent {
16
- functionType: "transcription";
16
+ functionType: "generate-transcription";
17
17
  result: TranscriptionFinishedEventResult;
18
18
  }
@@ -19,7 +19,7 @@ const ModelFunctionPromise_js_1 = require("../ModelFunctionPromise.cjs");
19
19
  */
20
20
  function generateTranscription(model, data, options) {
21
21
  return new ModelFunctionPromise_js_1.ModelFunctionPromise((0, executeStandardCall_js_1.executeStandardCall)({
22
- functionType: "transcription",
22
+ functionType: "generate-transcription",
23
23
  input: data,
24
24
  model,
25
25
  options,
@@ -16,7 +16,7 @@ import { ModelFunctionPromise } from "../ModelFunctionPromise.js";
16
16
  */
17
17
  export function generateTranscription(model, data, options) {
18
18
  return new ModelFunctionPromise(executeStandardCall({
19
- functionType: "transcription",
19
+ functionType: "generate-transcription",
20
20
  input: data,
21
21
  model,
22
22
  options,
@@ -5,7 +5,6 @@ const ApiCallError_js_1 = require("../../core/api/ApiCallError.cjs");
5
5
  const failedElevenLabsCallResponseHandler = async ({ response, url, requestBodyValues }) => {
6
6
  const responseBody = await response.text();
7
7
  try {
8
- // TODO implement ElevenLabsError
9
8
  return new ApiCallError_js_1.ApiCallError({
10
9
  message: responseBody,
11
10
  statusCode: response.status,
@@ -2,7 +2,6 @@ import { ApiCallError } from "../../core/api/ApiCallError.js";
2
2
  export const failedElevenLabsCallResponseHandler = async ({ response, url, requestBodyValues }) => {
3
3
  const responseBody = await response.text();
4
4
  try {
5
- // TODO implement ElevenLabsError
6
5
  return new ApiCallError({
7
6
  message: responseBody,
8
7
  statusCode: response.status,
@@ -15,11 +15,14 @@ const elevenLabsModels = [
15
15
  "eleven_multilingual_v1",
16
16
  "eleven_monolingual_v1",
17
17
  ];
18
- const defaultModel = "eleven_multilingual_v2";
18
+ const defaultModel = "eleven_monolingual_v1";
19
19
  /**
20
20
  * Synthesize speech using the ElevenLabs Text to Speech API.
21
21
  *
22
- * @see https://api.elevenlabs.io/docs#/text-to-speech/Text_to_speech_v1_text_to_speech__voice_id__post
22
+ * Both regular text-to-speech and full duplex text-to-speech streaming are supported.
23
+ *
24
+ * @see https://docs.elevenlabs.io/api-reference/text-to-speech
25
+ * @see https://docs.elevenlabs.io/api-reference/text-to-speech-websockets
23
26
  */
24
27
  class ElevenLabsSpeechModel extends AbstractModel_js_1.AbstractModel {
25
28
  constructor(settings) {
@@ -84,7 +87,11 @@ class ElevenLabsSpeechModel extends AbstractModel_js_1.AbstractModel {
84
87
  ]);
85
88
  const queue = new AsyncQueue_js_1.AsyncQueue();
86
89
  const model = this.settings.model ?? defaultModel;
87
- const socket = await (0, SimpleWebSocket_js_1.createSimpleWebSocket)(`wss://api.elevenlabs.io/v1/text-to-speech/${this.settings.voice}/stream-input?model_id=${model}`);
90
+ const socket = await (0, SimpleWebSocket_js_1.createSimpleWebSocket)(`wss://api.elevenlabs.io/v1/text-to-speech/${this.settings.voice}/stream-input${assembleQuery({
91
+ model_id: model,
92
+ optimize_streaming_latency: this.settings.optimizeStreamingLatency,
93
+ output_format: this.settings.outputFormat,
94
+ })}`);
88
95
  socket.onopen = async () => {
89
96
  const api = this.settings.api ?? new ElevenLabsApiConfiguration_js_1.ElevenLabsApiConfiguration();
90
97
  // send begin-of-stream (BOS) message:
@@ -158,9 +165,12 @@ class ElevenLabsSpeechModel extends AbstractModel_js_1.AbstractModel {
158
165
  }
159
166
  }
160
167
  exports.ElevenLabsSpeechModel = ElevenLabsSpeechModel;
161
- async function callElevenLabsTextToSpeechAPI({ api = new ElevenLabsApiConfiguration_js_1.ElevenLabsApiConfiguration(), abortSignal, text, voiceId, modelId, voiceSettings, }) {
168
+ async function callElevenLabsTextToSpeechAPI({ api = new ElevenLabsApiConfiguration_js_1.ElevenLabsApiConfiguration(), abortSignal, text, voiceId, modelId, optimizeStreamingLatency, outputFormat, voiceSettings, }) {
162
169
  return (0, postToApi_js_1.postJsonToApi)({
163
- url: api.assembleUrl(`/text-to-speech/${voiceId}`),
170
+ url: api.assembleUrl(`/text-to-speech/${voiceId}${assembleQuery({
171
+ optimize_streaming_latency: optimizeStreamingLatency,
172
+ output_format: outputFormat,
173
+ })}`),
164
174
  headers: api.headers,
165
175
  body: {
166
176
  text,
@@ -172,6 +182,24 @@ async function callElevenLabsTextToSpeechAPI({ api = new ElevenLabsApiConfigurat
172
182
  abortSignal,
173
183
  });
174
184
  }
185
+ function assembleQuery(parameters) {
186
+ let query = "";
187
+ let hasQuestionMark = false;
188
+ for (const [key, value] of Object.entries(parameters)) {
189
+ if (value == null) {
190
+ continue;
191
+ }
192
+ if (!hasQuestionMark) {
193
+ query += "?";
194
+ hasQuestionMark = true;
195
+ }
196
+ else {
197
+ query += "&";
198
+ }
199
+ query += `${key}=${value}`;
200
+ }
201
+ return query;
202
+ }
175
203
  function toApiVoiceSettings(voiceSettings) {
176
204
  return voiceSettings != null
177
205
  ? {
@@ -11,6 +11,8 @@ export interface ElevenLabsSpeechModelSettings extends SpeechGenerationModelSett
11
11
  };
12
12
  voice: string;
13
13
  model?: (typeof elevenLabsModels)[number] | (string & {});
14
+ optimizeStreamingLatency?: 0 | 1 | 2 | 3 | 4;
15
+ outputFormat?: "mp3_44100" | "pcm_16000" | "pcm_22050" | "pcm_24000" | "pcm_44100";
14
16
  voiceSettings?: {
15
17
  stability: number;
16
18
  similarityBoost: number;
@@ -24,7 +26,10 @@ export interface ElevenLabsSpeechModelSettings extends SpeechGenerationModelSett
24
26
  /**
25
27
  * Synthesize speech using the ElevenLabs Text to Speech API.
26
28
  *
27
- * @see https://api.elevenlabs.io/docs#/text-to-speech/Text_to_speech_v1_text_to_speech__voice_id__post
29
+ * Both regular text-to-speech and full duplex text-to-speech streaming are supported.
30
+ *
31
+ * @see https://docs.elevenlabs.io/api-reference/text-to-speech
32
+ * @see https://docs.elevenlabs.io/api-reference/text-to-speech-websockets
28
33
  */
29
34
  export declare class ElevenLabsSpeechModel extends AbstractModel<ElevenLabsSpeechModelSettings> implements StreamingSpeechGenerationModel<ElevenLabsSpeechModelSettings> {
30
35
  constructor(settings: ElevenLabsSpeechModelSettings);
@@ -12,11 +12,14 @@ const elevenLabsModels = [
12
12
  "eleven_multilingual_v1",
13
13
  "eleven_monolingual_v1",
14
14
  ];
15
- const defaultModel = "eleven_multilingual_v2";
15
+ const defaultModel = "eleven_monolingual_v1";
16
16
  /**
17
17
  * Synthesize speech using the ElevenLabs Text to Speech API.
18
18
  *
19
- * @see https://api.elevenlabs.io/docs#/text-to-speech/Text_to_speech_v1_text_to_speech__voice_id__post
19
+ * Both regular text-to-speech and full duplex text-to-speech streaming are supported.
20
+ *
21
+ * @see https://docs.elevenlabs.io/api-reference/text-to-speech
22
+ * @see https://docs.elevenlabs.io/api-reference/text-to-speech-websockets
20
23
  */
21
24
  export class ElevenLabsSpeechModel extends AbstractModel {
22
25
  constructor(settings) {
@@ -81,7 +84,11 @@ export class ElevenLabsSpeechModel extends AbstractModel {
81
84
  ]);
82
85
  const queue = new AsyncQueue();
83
86
  const model = this.settings.model ?? defaultModel;
84
- const socket = await createSimpleWebSocket(`wss://api.elevenlabs.io/v1/text-to-speech/${this.settings.voice}/stream-input?model_id=${model}`);
87
+ const socket = await createSimpleWebSocket(`wss://api.elevenlabs.io/v1/text-to-speech/${this.settings.voice}/stream-input${assembleQuery({
88
+ model_id: model,
89
+ optimize_streaming_latency: this.settings.optimizeStreamingLatency,
90
+ output_format: this.settings.outputFormat,
91
+ })}`);
85
92
  socket.onopen = async () => {
86
93
  const api = this.settings.api ?? new ElevenLabsApiConfiguration();
87
94
  // send begin-of-stream (BOS) message:
@@ -154,9 +161,12 @@ export class ElevenLabsSpeechModel extends AbstractModel {
154
161
  });
155
162
  }
156
163
  }
157
- async function callElevenLabsTextToSpeechAPI({ api = new ElevenLabsApiConfiguration(), abortSignal, text, voiceId, modelId, voiceSettings, }) {
164
+ async function callElevenLabsTextToSpeechAPI({ api = new ElevenLabsApiConfiguration(), abortSignal, text, voiceId, modelId, optimizeStreamingLatency, outputFormat, voiceSettings, }) {
158
165
  return postJsonToApi({
159
- url: api.assembleUrl(`/text-to-speech/${voiceId}`),
166
+ url: api.assembleUrl(`/text-to-speech/${voiceId}${assembleQuery({
167
+ optimize_streaming_latency: optimizeStreamingLatency,
168
+ output_format: outputFormat,
169
+ })}`),
160
170
  headers: api.headers,
161
171
  body: {
162
172
  text,
@@ -168,6 +178,24 @@ async function callElevenLabsTextToSpeechAPI({ api = new ElevenLabsApiConfigurat
168
178
  abortSignal,
169
179
  });
170
180
  }
181
+ function assembleQuery(parameters) {
182
+ let query = "";
183
+ let hasQuestionMark = false;
184
+ for (const [key, value] of Object.entries(parameters)) {
185
+ if (value == null) {
186
+ continue;
187
+ }
188
+ if (!hasQuestionMark) {
189
+ query += "?";
190
+ hasQuestionMark = true;
191
+ }
192
+ else {
193
+ query += "&";
194
+ }
195
+ query += `${key}=${value}`;
196
+ }
197
+ return query;
198
+ }
171
199
  function toApiVoiceSettings(voiceSettings) {
172
200
  return voiceSettings != null
173
201
  ? {
@@ -5,7 +5,6 @@ const ApiCallError_js_1 = require("../../core/api/ApiCallError.cjs");
5
5
  const failedLmntCallResponseHandler = async ({ response, url, requestBodyValues }) => {
6
6
  const responseBody = await response.text();
7
7
  try {
8
- // TODO implement LmntError
9
8
  return new ApiCallError_js_1.ApiCallError({
10
9
  message: responseBody,
11
10
  statusCode: response.status,
@@ -2,7 +2,6 @@ import { ApiCallError } from "../../core/api/ApiCallError.js";
2
2
  export const failedLmntCallResponseHandler = async ({ response, url, requestBodyValues }) => {
3
3
  const responseBody = await response.text();
4
4
  try {
5
- // TODO implement LmntError
6
5
  return new ApiCallError({
7
6
  message: responseBody,
8
7
  statusCode: response.status,
@@ -148,12 +148,12 @@ export declare class OpenAICompletionModel extends AbstractModel<OpenAICompletio
148
148
  doGenerateText(prompt: string, options?: FunctionOptions): Promise<{
149
149
  response: {
150
150
  object: "text_completion";
151
- model: string;
152
151
  usage: {
153
152
  prompt_tokens: number;
154
153
  completion_tokens: number;
155
154
  total_tokens: number;
156
155
  };
156
+ model: string;
157
157
  id: string;
158
158
  created: number;
159
159
  choices: {
@@ -221,12 +221,12 @@ declare const OpenAICompletionResponseSchema: z.ZodObject<{
221
221
  }>;
222
222
  }, "strip", z.ZodTypeAny, {
223
223
  object: "text_completion";
224
- model: string;
225
224
  usage: {
226
225
  prompt_tokens: number;
227
226
  completion_tokens: number;
228
227
  total_tokens: number;
229
228
  };
229
+ model: string;
230
230
  id: string;
231
231
  created: number;
232
232
  choices: {
@@ -237,12 +237,12 @@ declare const OpenAICompletionResponseSchema: z.ZodObject<{
237
237
  }[];
238
238
  }, {
239
239
  object: "text_completion";
240
- model: string;
241
240
  usage: {
242
241
  prompt_tokens: number;
243
242
  completion_tokens: number;
244
243
  total_tokens: number;
245
244
  };
245
+ model: string;
246
246
  id: string;
247
247
  created: number;
248
248
  choices: {
@@ -265,12 +265,12 @@ export declare const OpenAITextResponseFormat: {
265
265
  stream: false;
266
266
  handler: ResponseHandler<{
267
267
  object: "text_completion";
268
- model: string;
269
268
  usage: {
270
269
  prompt_tokens: number;
271
270
  completion_tokens: number;
272
271
  total_tokens: number;
273
272
  };
273
+ model: string;
274
274
  id: string;
275
275
  created: number;
276
276
  choices: {
@@ -19,12 +19,12 @@ class OpenAICostCalculator {
19
19
  const type = call.functionType;
20
20
  const model = call.model.modelName;
21
21
  switch (type) {
22
- case "image-generation": {
22
+ case "generate-image": {
23
23
  return (0, OpenAIImageGenerationModel_js_1.calculateOpenAIImageGenerationCostInMillicents)({
24
24
  settings: call.settings,
25
25
  });
26
26
  }
27
- case "embedding": {
27
+ case "embed": {
28
28
  if (model == null) {
29
29
  return null;
30
30
  }
@@ -39,8 +39,8 @@ class OpenAICostCalculator {
39
39
  }
40
40
  break;
41
41
  }
42
- case "structure-generation":
43
- case "text-generation": {
42
+ case "generate-structure":
43
+ case "generate-text": {
44
44
  if (model == null) {
45
45
  return null;
46
46
  }
@@ -58,7 +58,7 @@ class OpenAICostCalculator {
58
58
  }
59
59
  break;
60
60
  }
61
- case "transcription": {
61
+ case "generate-transcription": {
62
62
  if (model == null) {
63
63
  return null;
64
64
  }
@@ -16,12 +16,12 @@ export class OpenAICostCalculator {
16
16
  const type = call.functionType;
17
17
  const model = call.model.modelName;
18
18
  switch (type) {
19
- case "image-generation": {
19
+ case "generate-image": {
20
20
  return calculateOpenAIImageGenerationCostInMillicents({
21
21
  settings: call.settings,
22
22
  });
23
23
  }
24
- case "embedding": {
24
+ case "embed": {
25
25
  if (model == null) {
26
26
  return null;
27
27
  }
@@ -36,8 +36,8 @@ export class OpenAICostCalculator {
36
36
  }
37
37
  break;
38
38
  }
39
- case "structure-generation":
40
- case "text-generation": {
39
+ case "generate-structure":
40
+ case "generate-text": {
41
41
  if (model == null) {
42
42
  return null;
43
43
  }
@@ -55,7 +55,7 @@ export class OpenAICostCalculator {
55
55
  }
56
56
  break;
57
57
  }
58
- case "transcription": {
58
+ case "generate-transcription": {
59
59
  if (model == null) {
60
60
  return null;
61
61
  }
@@ -50,11 +50,11 @@ export declare class OpenAITextEmbeddingModel extends AbstractModel<OpenAITextEm
50
50
  doEmbedValues(texts: string[], options?: FunctionOptions): Promise<{
51
51
  response: {
52
52
  object: "list";
53
- model: string;
54
53
  usage: {
55
54
  prompt_tokens: number;
56
55
  total_tokens: number;
57
56
  };
57
+ model: string;
58
58
  data: {
59
59
  object: "embedding";
60
60
  embedding: number[];
@@ -93,11 +93,11 @@ declare const openAITextEmbeddingResponseSchema: z.ZodObject<{
93
93
  }>;
94
94
  }, "strip", z.ZodTypeAny, {
95
95
  object: "list";
96
- model: string;
97
96
  usage: {
98
97
  prompt_tokens: number;
99
98
  total_tokens: number;
100
99
  };
100
+ model: string;
101
101
  data: {
102
102
  object: "embedding";
103
103
  embedding: number[];
@@ -105,11 +105,11 @@ declare const openAITextEmbeddingResponseSchema: z.ZodObject<{
105
105
  }[];
106
106
  }, {
107
107
  object: "list";
108
- model: string;
109
108
  usage: {
110
109
  prompt_tokens: number;
111
110
  total_tokens: number;
112
111
  };
112
+ model: string;
113
113
  data: {
114
114
  object: "embedding";
115
115
  embedding: number[];
@@ -158,12 +158,12 @@ export declare class OpenAIChatModel extends AbstractModel<OpenAIChatSettings> i
158
158
  doGenerateText(prompt: OpenAIChatMessage[], options?: FunctionOptions): Promise<{
159
159
  response: {
160
160
  object: "chat.completion";
161
- model: string;
162
161
  usage: {
163
162
  prompt_tokens: number;
164
163
  completion_tokens: number;
165
164
  total_tokens: number;
166
165
  };
166
+ model: string;
167
167
  id: string;
168
168
  created: number;
169
169
  choices: {
@@ -198,12 +198,12 @@ export declare class OpenAIChatModel extends AbstractModel<OpenAIChatSettings> i
198
198
  doGenerateStructure(structureDefinition: StructureDefinition<string, unknown>, prompt: OpenAIChatMessage[], options?: FunctionOptions): Promise<{
199
199
  response: {
200
200
  object: "chat.completion";
201
- model: string;
202
201
  usage: {
203
202
  prompt_tokens: number;
204
203
  completion_tokens: number;
205
204
  total_tokens: number;
206
205
  };
206
+ model: string;
207
207
  id: string;
208
208
  created: number;
209
209
  choices: {
@@ -232,12 +232,12 @@ export declare class OpenAIChatModel extends AbstractModel<OpenAIChatSettings> i
232
232
  doGenerateStructureOrText(structureDefinitions: Array<StructureDefinition<string, unknown>>, prompt: OpenAIChatMessage[], options?: FunctionOptions): Promise<{
233
233
  response: {
234
234
  object: "chat.completion";
235
- model: string;
236
235
  usage: {
237
236
  prompt_tokens: number;
238
237
  completion_tokens: number;
239
238
  total_tokens: number;
240
239
  };
240
+ model: string;
241
241
  id: string;
242
242
  created: number;
243
243
  choices: {
@@ -268,12 +268,12 @@ export declare class OpenAIChatModel extends AbstractModel<OpenAIChatSettings> i
268
268
  } | {
269
269
  response: {
270
270
  object: "chat.completion";
271
- model: string;
272
271
  usage: {
273
272
  prompt_tokens: number;
274
273
  completion_tokens: number;
275
274
  total_tokens: number;
276
275
  };
276
+ model: string;
277
277
  id: string;
278
278
  created: number;
279
279
  choices: {
@@ -395,12 +395,12 @@ declare const openAIChatResponseSchema: z.ZodObject<{
395
395
  }>;
396
396
  }, "strip", z.ZodTypeAny, {
397
397
  object: "chat.completion";
398
- model: string;
399
398
  usage: {
400
399
  prompt_tokens: number;
401
400
  completion_tokens: number;
402
401
  total_tokens: number;
403
402
  };
403
+ model: string;
404
404
  id: string;
405
405
  created: number;
406
406
  choices: {
@@ -418,12 +418,12 @@ declare const openAIChatResponseSchema: z.ZodObject<{
418
418
  }[];
419
419
  }, {
420
420
  object: "chat.completion";
421
- model: string;
422
421
  usage: {
423
422
  prompt_tokens: number;
424
423
  completion_tokens: number;
425
424
  total_tokens: number;
426
425
  };
426
+ model: string;
427
427
  id: string;
428
428
  created: number;
429
429
  choices: {
@@ -453,12 +453,12 @@ export declare const OpenAIChatResponseFormat: {
453
453
  stream: false;
454
454
  handler: ResponseHandler<{
455
455
  object: "chat.completion";
456
- model: string;
457
456
  usage: {
458
457
  prompt_tokens: number;
459
458
  completion_tokens: number;
460
459
  total_tokens: number;
461
460
  };
461
+ model: string;
462
462
  id: string;
463
463
  created: number;
464
464
  choices: {
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "modelfusion",
3
3
  "description": "Build multimodal applications, chatbots, and agents with JavaScript and TypeScript.",
4
- "version": "0.49.0",
4
+ "version": "0.51.0",
5
5
  "author": "Lars Grammel",
6
6
  "license": "MIT",
7
7
  "keywords": [
@@ -1,83 +1,15 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.retrieve = void 0;
4
- const nanoid_1 = require("nanoid");
5
- const FunctionEventSource_js_1 = require("../core/FunctionEventSource.cjs");
6
- const GlobalFunctionLogging_js_1 = require("../core/GlobalFunctionLogging.cjs");
7
- const GlobalFunctionObservers_js_1 = require("../core/GlobalFunctionObservers.cjs");
8
- const AbortError_js_1 = require("../core/api/AbortError.cjs");
9
- const getFunctionCallLogger_js_1 = require("../core/getFunctionCallLogger.cjs");
10
- const getRun_js_1 = require("../core/getRun.cjs");
11
- const DurationMeasurement_js_1 = require("../util/DurationMeasurement.cjs");
12
- const runSafe_js_1 = require("../util/runSafe.cjs");
4
+ const executeFunctionCall_js_1 = require("../core/executeFunctionCall.cjs");
13
5
  async function retrieve(retriever, query, options) {
14
- const run = await (0, getRun_js_1.getRun)(options?.run);
15
- const eventSource = new FunctionEventSource_js_1.FunctionEventSource({
16
- observers: [
17
- ...(0, getFunctionCallLogger_js_1.getFunctionCallLogger)(options?.logging ?? (0, GlobalFunctionLogging_js_1.getGlobalFunctionLogging)()),
18
- ...(0, GlobalFunctionObservers_js_1.getGlobalFunctionObservers)(),
19
- ...(run?.functionObserver != null ? [run.functionObserver] : []),
20
- ...(options?.observers ?? []),
21
- ],
22
- errorHandler: run?.errorHandler,
23
- });
24
- const durationMeasurement = (0, DurationMeasurement_js_1.startDurationMeasurement)();
25
- const startMetadata = {
6
+ return (0, executeFunctionCall_js_1.executeFunctionCall)({
7
+ options,
8
+ input: query,
26
9
  functionType: "retrieve",
27
- callId: `call-${(0, nanoid_1.nanoid)()}`,
28
- runId: run?.runId,
29
- sessionId: run?.sessionId,
30
- userId: run?.userId,
31
- functionId: options?.functionId,
32
- query,
33
- timestamp: durationMeasurement.startDate,
34
- startTimestamp: durationMeasurement.startDate,
35
- };
36
- eventSource.notify({
37
- eventType: "started",
38
- ...startMetadata,
39
- });
40
- const result = await (0, runSafe_js_1.runSafe)(() => retriever.retrieve(query, {
41
- functionId: options?.functionId,
42
- logging: options?.logging,
43
- observers: options?.observers,
44
- run,
45
- }));
46
- const finishMetadata = {
47
- eventType: "finished",
48
- ...startMetadata,
49
- finishTimestamp: new Date(),
50
- durationInMs: durationMeasurement.durationInMs,
51
- };
52
- if (!result.ok) {
53
- if (result.isAborted) {
54
- eventSource.notify({
55
- ...finishMetadata,
56
- eventType: "finished",
57
- result: {
58
- status: "abort",
59
- },
60
- });
61
- throw new AbortError_js_1.AbortError();
62
- }
63
- eventSource.notify({
64
- ...finishMetadata,
65
- eventType: "finished",
66
- result: {
67
- status: "error",
68
- error: result.error,
69
- },
70
- });
71
- throw result.error;
72
- }
73
- eventSource.notify({
74
- ...finishMetadata,
75
- eventType: "finished",
76
- result: {
77
- status: "success",
78
- value: result.value,
79
- },
10
+ execute: (options) => retriever.retrieve(query, options),
11
+ inputPropertyName: "query",
12
+ outputPropertyName: "results",
80
13
  });
81
- return result.value;
82
14
  }
83
15
  exports.retrieve = retrieve;