modelfusion 0.121.2 → 0.122.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (83) hide show
  1. package/CHANGELOG.md +41 -1
  2. package/README.md +86 -84
  3. package/classifier/SemanticClassifier.cjs +8 -2
  4. package/classifier/SemanticClassifier.js +8 -2
  5. package/model-function/ModelCallEvent.d.ts +3 -0
  6. package/model-function/embed/embed.cjs +14 -14
  7. package/model-function/embed/embed.d.ts +24 -18
  8. package/model-function/embed/embed.js +14 -14
  9. package/model-function/generate-image/generateImage.cjs +6 -6
  10. package/model-function/generate-image/generateImage.d.ts +12 -9
  11. package/model-function/generate-image/generateImage.js +6 -6
  12. package/model-function/generate-speech/generateSpeech.cjs +7 -7
  13. package/model-function/generate-speech/generateSpeech.d.ts +12 -9
  14. package/model-function/generate-speech/generateSpeech.js +7 -7
  15. package/model-function/generate-speech/streamSpeech.cjs +6 -6
  16. package/model-function/generate-speech/streamSpeech.d.ts +12 -8
  17. package/model-function/generate-speech/streamSpeech.js +6 -6
  18. package/model-function/generate-structure/StructureFromTextGenerationModel.cjs +5 -3
  19. package/model-function/generate-structure/StructureFromTextGenerationModel.d.ts +1 -1
  20. package/model-function/generate-structure/StructureFromTextGenerationModel.js +5 -3
  21. package/model-function/generate-structure/StructureFromTextStreamingModel.cjs +5 -1
  22. package/model-function/generate-structure/StructureFromTextStreamingModel.js +5 -1
  23. package/model-function/generate-structure/StructureGenerationModel.d.ts +1 -1
  24. package/model-function/generate-structure/generateStructure.cjs +8 -8
  25. package/model-function/generate-structure/generateStructure.d.ts +17 -10
  26. package/model-function/generate-structure/generateStructure.js +8 -8
  27. package/model-function/generate-structure/streamStructure.cjs +6 -6
  28. package/model-function/generate-structure/streamStructure.d.ts +16 -10
  29. package/model-function/generate-structure/streamStructure.js +6 -6
  30. package/model-function/generate-text/generateText.cjs +6 -6
  31. package/model-function/generate-text/generateText.d.ts +12 -9
  32. package/model-function/generate-text/generateText.js +6 -6
  33. package/model-function/generate-text/streamText.cjs +6 -6
  34. package/model-function/generate-text/streamText.d.ts +12 -8
  35. package/model-function/generate-text/streamText.js +6 -6
  36. package/model-function/generate-transcription/generateTranscription.cjs +3 -3
  37. package/model-function/generate-transcription/generateTranscription.d.ts +12 -9
  38. package/model-function/generate-transcription/generateTranscription.js +3 -3
  39. package/model-provider/cohere/CohereTextGenerationModel.d.ts +12 -12
  40. package/model-provider/cohere/CohereTextGenerationModel.test.cjs +7 -4
  41. package/model-provider/cohere/CohereTextGenerationModel.test.js +7 -4
  42. package/model-provider/llamacpp/LlamaCppCompletionModel.d.ts +10 -10
  43. package/model-provider/llamacpp/LlamaCppCompletionModel.test.cjs +4 -1
  44. package/model-provider/llamacpp/LlamaCppCompletionModel.test.js +4 -1
  45. package/model-provider/mistral/MistralChatModel.test.cjs +15 -8
  46. package/model-provider/mistral/MistralChatModel.test.js +15 -8
  47. package/model-provider/ollama/OllamaChatModel.test.cjs +6 -1
  48. package/model-provider/ollama/OllamaChatModel.test.js +6 -1
  49. package/model-provider/ollama/OllamaCompletionModel.test.cjs +31 -16
  50. package/model-provider/ollama/OllamaCompletionModel.test.js +31 -16
  51. package/model-provider/openai/OpenAIChatFunctionCallStructureGenerationModel.cjs +4 -4
  52. package/model-provider/openai/OpenAIChatFunctionCallStructureGenerationModel.d.ts +1 -1
  53. package/model-provider/openai/OpenAIChatFunctionCallStructureGenerationModel.js +4 -4
  54. package/model-provider/openai/OpenAIChatModel.test.cjs +21 -14
  55. package/model-provider/openai/OpenAIChatModel.test.js +21 -14
  56. package/model-provider/openai/OpenAICompletionModel.test.cjs +15 -9
  57. package/model-provider/openai/OpenAICompletionModel.test.js +15 -9
  58. package/package.json +1 -1
  59. package/tool/execute-tool/executeTool.cjs +5 -5
  60. package/tool/execute-tool/executeTool.d.ts +8 -4
  61. package/tool/execute-tool/executeTool.js +5 -5
  62. package/tool/execute-tool/safeExecuteToolCall.cjs +1 -1
  63. package/tool/execute-tool/safeExecuteToolCall.js +1 -1
  64. package/tool/generate-tool-call/TextGenerationToolCallModel.cjs +4 -2
  65. package/tool/generate-tool-call/TextGenerationToolCallModel.js +4 -2
  66. package/tool/generate-tool-call/generateToolCall.cjs +7 -7
  67. package/tool/generate-tool-call/generateToolCall.d.ts +11 -5
  68. package/tool/generate-tool-call/generateToolCall.js +7 -7
  69. package/tool/generate-tool-calls/TextGenerationToolCallsModel.cjs +4 -2
  70. package/tool/generate-tool-calls/TextGenerationToolCallsModel.js +4 -2
  71. package/tool/generate-tool-calls/generateToolCalls.cjs +3 -3
  72. package/tool/generate-tool-calls/generateToolCalls.d.ts +11 -5
  73. package/tool/generate-tool-calls/generateToolCalls.js +3 -3
  74. package/tool/use-tool/useTool.cjs +2 -2
  75. package/tool/use-tool/useTool.d.ts +5 -1
  76. package/tool/use-tool/useTool.js +2 -2
  77. package/tool/use-tools/useTools.cjs +8 -2
  78. package/tool/use-tools/useTools.d.ts +5 -1
  79. package/tool/use-tools/useTools.js +8 -2
  80. package/vector-index/VectorIndexRetriever.cjs +5 -1
  81. package/vector-index/VectorIndexRetriever.js +5 -1
  82. package/vector-index/upsertIntoVectorIndex.cjs +5 -1
  83. package/vector-index/upsertIntoVectorIndex.js +5 -1
package/CHANGELOG.md CHANGED
@@ -1,10 +1,50 @@
1
1
  # Changelog
2
2
 
3
+ ## v0.122.0 - 2024-01-13
4
+
5
+ ### Changed
6
+
7
+ - **breaking change**: Switch from positional parameters to named parameters (parameter object) for all model and tool functions. The parameter object is the first and only parameter of the function. Additional options (last parameter before) are now part of the parameter object. Example:
8
+
9
+ ```ts
10
+ // old:
11
+ const text = await generateText(
12
+ openai
13
+ .ChatTextGenerator({
14
+ model: "gpt-3.5-turbo",
15
+ maxGenerationTokens: 1000,
16
+ })
17
+ .withTextPrompt(),
18
+
19
+ "Write a short story about a robot learning to love",
20
+
21
+ {
22
+ functionId: "example-function",
23
+ }
24
+ );
25
+
26
+ // new:
27
+ const text = await generateText({
28
+ model: openai
29
+ .ChatTextGenerator({
30
+ model: "gpt-3.5-turbo",
31
+ maxGenerationTokens: 1000,
32
+ })
33
+ .withTextPrompt(),
34
+
35
+ prompt: "Write a short story about a robot learning to love",
36
+
37
+ functionId: "example-function",
38
+ });
39
+ ```
40
+
41
+ This change was made to make the API more flexible and to allow for future extensions.
42
+
3
43
  ## v0.121.2 - 2024-01-11
4
44
 
5
45
  ### Fixed
6
46
 
7
- - Ollama response schema for repeated calls with Ollama 0.1.19 completion models. Thanks [@jakedetels](https://github.com/Necmttn) for the bugfix!
47
+ - Ollama response schema for repeated calls with Ollama 0.1.19 completion models. Thanks [@Necmttn](https://github.com/Necmttn) for the bugfix!
8
48
 
9
49
  ## v0.121.1 - 2024-01-10
10
50
 
package/README.md CHANGED
@@ -53,10 +53,10 @@ You can use [prompt templates](https://modelfusion.dev/guide/function/generate-t
53
53
  ```ts
54
54
  import { generateText, openai } from "modelfusion";
55
55
 
56
- const text = await generateText(
57
- openai.CompletionTextGenerator({ model: "gpt-3.5-turbo-instruct" }),
58
- "Write a short story about a robot learning to love:\n\n"
59
- );
56
+ const text = await generateText({
57
+ model: openai.CompletionTextGenerator({ model: "gpt-3.5-turbo-instruct" }),
58
+ prompt: "Write a short story about a robot learning to love:\n\n",
59
+ });
60
60
  ```
61
61
 
62
62
  Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [OpenAI compatible](https://modelfusion.dev/integration/model-provider/openaicompatible), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp), [Ollama](https://modelfusion.dev/integration/model-provider/ollama), [Mistral](https://modelfusion.dev/integration/model-provider/mistral), [Hugging Face](https://modelfusion.dev/integration/model-provider/huggingface), [Cohere](https://modelfusion.dev/integration/model-provider/cohere)
@@ -66,10 +66,10 @@ Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai),
66
66
  ```ts
67
67
  import { streamText, openai } from "modelfusion";
68
68
 
69
- const textStream = await streamText(
70
- openai.CompletionTextGenerator({ model: "gpt-3.5-turbo-instruct" }),
71
- "Write a short story about a robot learning to love:\n\n"
72
- );
69
+ const textStream = await streamText({
70
+ model: openai.CompletionTextGenerator({ model: "gpt-3.5-turbo-instruct" }),
71
+ prompt: "Write a short story about a robot learning to love:\n\n",
72
+ });
73
73
 
74
74
  for await (const textPart of textStream) {
75
75
  process.stdout.write(textPart);
@@ -88,15 +88,15 @@ import { readFileSync } from "fs";
88
88
 
89
89
  const image = readFileSync("./image.png").toString("base64");
90
90
 
91
- const textStream = await streamText(
92
- openai.ChatTextGenerator({ model: "gpt-4-vision-preview" }),
93
- [
91
+ const textStream = await streamText({
92
+ model: openai.ChatTextGenerator({ model: "gpt-4-vision-preview" }),
93
+ prompt: [
94
94
  openai.ChatMessage.user([
95
95
  { type: "text", text: "Describe the image in detail:" },
96
96
  { type: "image", base64Image: image, mimeType: "image/png" },
97
97
  ]),
98
- ]
99
- );
98
+ ],
99
+ });
100
100
 
101
101
  for await (const textPart of textStream) {
102
102
  process.stdout.write(textPart);
@@ -121,9 +121,8 @@ import {
121
121
  jsonStructurePrompt,
122
122
  } from "modelfusion";
123
123
 
124
- const sentiment = await generateStructure(
125
- // model:
126
- ollama
124
+ const sentiment = await generateStructure({
125
+ model: ollama
127
126
  .ChatTextGenerator({
128
127
  model: "openhermes2.5-mistral",
129
128
  maxGenerationTokens: 1024,
@@ -131,8 +130,7 @@ const sentiment = await generateStructure(
131
130
  })
132
131
  .asStructureGenerationModel(jsonStructurePrompt.instruction()),
133
132
 
134
- // schema:
135
- zodSchema(
133
+ schema: zodSchema(
136
134
  z.object({
137
135
  sentiment: z
138
136
  .enum(["positive", "neutral", "negative"])
@@ -140,16 +138,15 @@ const sentiment = await generateStructure(
140
138
  })
141
139
  ),
142
140
 
143
- // prompt:
144
- {
141
+ prompt: {
145
142
  system:
146
143
  "You are a sentiment evaluator. " +
147
144
  "Analyze the sentiment of the following product review:",
148
145
  instruction:
149
146
  "After I opened the package, I was met by a very unpleasant smell " +
150
147
  "that did not disappear even after washing. Never again!",
151
- }
152
- );
148
+ },
149
+ });
153
150
  ```
154
151
 
155
152
  Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [Ollama](https://modelfusion.dev//integration/model-provider/ollama), [Llama.cpp](https://modelfusion.dev//integration/model-provider/llama.cpp)
@@ -161,8 +158,8 @@ Stream a structure that matches a schema. Partial structures before the final pa
161
158
  ```ts
162
159
  import { zodSchema, openai, streamStructure } from "modelfusion";
163
160
 
164
- const structureStream = await streamStructure(
165
- openai
161
+ const structureStream = await streamStructure({
162
+ model: openai
166
163
  .ChatTextGenerator(/* ... */)
167
164
  .asFunctionCallStructureGenerationModel({
168
165
  fnName: "generateCharacter",
@@ -170,7 +167,7 @@ const structureStream = await streamStructure(
170
167
  })
171
168
  .withTextPrompt(),
172
169
 
173
- zodSchema(
170
+ schema: zodSchema(
174
171
  z.object({
175
172
  characters: z.array(
176
173
  z.object({
@@ -184,8 +181,8 @@ const structureStream = await streamStructure(
184
181
  })
185
182
  ),
186
183
 
187
- "Generate 3 character descriptions for a fantasy role playing game."
188
- );
184
+ prompt: "Generate 3 character descriptions for a fantasy role playing game.",
185
+ });
189
186
 
190
187
  for await (const part of structureStream) {
191
188
  if (!part.isComplete) {
@@ -207,10 +204,11 @@ Generate an image from a prompt.
207
204
  ```ts
208
205
  import { generateImage, openai } from "modelfusion";
209
206
 
210
- const image = await generateImage(
211
- openai.ImageGenerator({ model: "dall-e-3", size: "1024x1024" }),
212
- "the wicked witch of the west in the style of early 19th century painting"
213
- );
207
+ const image = await generateImage({
208
+ model: openai.ImageGenerator({ model: "dall-e-3", size: "1024x1024" }),
209
+ prompt:
210
+ "the wicked witch of the west in the style of early 19th century painting",
211
+ });
214
212
  ```
215
213
 
216
214
  Providers: [OpenAI (Dall·E)](https://modelfusion.dev/integration/model-provider/openai), [Stability AI](https://modelfusion.dev/integration/model-provider/stability), [Automatic1111](https://modelfusion.dev/integration/model-provider/automatic1111)
@@ -227,15 +225,16 @@ Synthesize speech (audio) from text. Also called TTS (text-to-speech).
227
225
  import { generateSpeech, lmnt } from "modelfusion";
228
226
 
229
227
  // `speech` is a Buffer with MP3 audio data
230
- const speech = await generateSpeech(
231
- lmnt.SpeechGenerator({
228
+ const speech = await generateSpeech({
229
+ model: lmnt.SpeechGenerator({
232
230
  voice: "034b632b-df71-46c8-b440-86a42ffc3cf3", // Henry
233
231
  }),
234
- "Good evening, ladies and gentlemen! Exciting news on the airwaves tonight " +
232
+ text:
233
+ "Good evening, ladies and gentlemen! Exciting news on the airwaves tonight " +
235
234
  "as The Rolling Stones unveil 'Hackney Diamonds,' their first collection of " +
236
235
  "fresh tunes in nearly twenty years, featuring the illustrious Lady Gaga, the " +
237
- "magical Stevie Wonder, and the final beats from the late Charlie Watts."
238
- );
236
+ "magical Stevie Wonder, and the final beats from the late Charlie Watts.",
237
+ });
239
238
  ```
240
239
 
241
240
  Providers: [Eleven Labs](https://modelfusion.dev/integration/model-provider/elevenlabs), [LMNT](https://modelfusion.dev/integration/model-provider/lmnt), [OpenAI](https://modelfusion.dev/integration/model-provider/openai)
@@ -249,8 +248,8 @@ import { streamSpeech, elevenlabs } from "modelfusion";
249
248
 
250
249
  const textStream: AsyncIterable<string>;
251
250
 
252
- const speechStream = await streamSpeech(
253
- elevenlabs.SpeechGenerator({
251
+ const speechStream = await streamSpeech({
252
+ model: elevenlabs.SpeechGenerator({
254
253
  model: "eleven_turbo_v2",
255
254
  voice: "pNInz6obpgDQGcFmaJgB", // Adam
256
255
  optimizeStreamingLatency: 1,
@@ -259,8 +258,8 @@ const speechStream = await streamSpeech(
259
258
  chunkLengthSchedule: [50, 90, 120, 150, 200],
260
259
  },
261
260
  }),
262
- textStream
263
- );
261
+ text: textStream,
262
+ });
264
263
 
265
264
  for await (const part of speechStream) {
266
265
  // each part is a Buffer with MP3 audio data
@@ -276,13 +275,13 @@ Transcribe speech (audio) data into text. Also called speech-to-text (STT).
276
275
  ```ts
277
276
  import { generateTranscription, openai } from "modelfusion";
278
277
 
279
- const transcription = await generateTranscription(
280
- openai.Transcriber({ model: "whisper-1" }),
281
- {
278
+ const transcription = await generateTranscription({
279
+ model: openai.Transcriber({ model: "whisper-1" }),
280
+ data: {
282
281
  type: "mp3",
283
282
  data: await fs.promises.readFile("data/test.mp3"),
284
- }
285
- );
283
+ },
284
+ });
286
285
  ```
287
286
 
288
287
  Providers: [OpenAI (Whisper)](https://modelfusion.dev/integration/model-provider/openai), [Whisper.cpp](https://modelfusion.dev/integration/model-provider/whispercpp)
@@ -293,19 +292,19 @@ Create embeddings for text and other values. Embeddings are vectors that represe
293
292
 
294
293
  ```ts
295
294
  // embed single value:
296
- const embedding = await embed(
297
- openai.TextEmbedder({ model: "text-embedding-ada-002" }),
298
- "At first, Nox didn't know what to do with the pup."
299
- );
295
+ const embedding = await embed({
296
+ model: openai.TextEmbedder({ model: "text-embedding-ada-002" }),
297
+ value: "At first, Nox didn't know what to do with the pup.",
298
+ });
300
299
 
301
300
  // embed many values:
302
- const embeddings = await embedMany(
303
- openai.TextEmbedder({ model: "text-embedding-ada-002" }),
304
- [
301
+ const embeddings = await embedMany({
302
+ model: openai.TextEmbedder({ model: "text-embedding-ada-002" }),
303
+ values: [
305
304
  "At first, Nox didn't know what to do with the pup.",
306
305
  "He keenly observed and absorbed everything around him, from the birds in the sky to the trees in the forest.",
307
- ]
308
- );
306
+ ],
307
+ });
309
308
  ```
310
309
 
311
310
  Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp), [Ollama](https://modelfusion.dev/integration/model-provider/ollama), [Mistral](https://modelfusion.dev/integration/model-provider/mistral), [Hugging Face](https://modelfusion.dev/integration/model-provider/huggingface), [Cohere](https://modelfusion.dev/integration/model-provider/cohere)
@@ -339,11 +338,11 @@ ModelFusion offers several tools out-of-the-box: [Math.js](https://modelfusion.d
339
338
  With `useTool`, you can ask a tool-compatible language model (e.g. OpenAI chat) to invoke a single tool. `useTool` first generates a tool call and then executes the tool with the arguments.
340
339
 
341
340
  ```ts
342
- const { tool, toolCall, args, ok, result } = await useTool(
343
- openai.ChatTextGenerator({ model: "gpt-3.5-turbo" }),
344
- calculator,
345
- [openai.ChatMessage.user("What's fourteen times twelve?")]
346
- );
341
+ const { tool, toolCall, args, ok, result } = await useTool({
342
+ model: openai.ChatTextGenerator({ model: "gpt-3.5-turbo" }),
343
+ too: calculator,
344
+ prompt: [openai.ChatMessage.user("What's fourteen times twelve?")],
345
+ });
347
346
 
348
347
  console.log(`Tool call:`, toolCall);
349
348
  console.log(`Tool:`, tool);
@@ -357,11 +356,11 @@ console.log(`Result or Error:`, result);
357
356
  With `useTools`, you can ask a language model to generate several tool calls as well as text. The model will choose which tools (if any) should be called with which arguments. Both the text and the tool calls are optional. This function executes the tools.
358
357
 
359
358
  ```ts
360
- const { text, toolResults } = await useTools(
361
- openai.ChatTextGenerator({ model: "gpt-3.5-turbo" }),
362
- [calculator /* ... */],
363
- [openai.ChatMessage.user("What's fourteen times twelve?")]
364
- );
359
+ const { text, toolResults } = await useTools({
360
+ model: openai.ChatTextGenerator({ model: "gpt-3.5-turbo" }),
361
+ tools: [calculator /* ... */],
362
+ prompt: [openai.ChatMessage.user("What's fourteen times twelve?")],
363
+ });
365
364
  ```
366
365
 
367
366
  #### [Agent Loop](https://modelfusion.dev/guide/tools/agent-loop)
@@ -411,21 +410,22 @@ Prompt templates let you use higher level prompt structures (such as text, instr
411
410
  #### Text Prompt Example
412
411
 
413
412
  ```ts
414
- const text = await generateText(
415
- openai
413
+ const text = await generateText({
414
+ model: openai
416
415
  .ChatTextGenerator({
417
416
  // ...
418
417
  })
419
418
  .withTextPrompt(),
420
- "Write a short story about a robot learning to love"
421
- );
419
+
420
+ prompt: "Write a short story about a robot learning to love",
421
+ });
422
422
  ```
423
423
 
424
424
  #### Instruction Prompt Example
425
425
 
426
426
  ```ts
427
- const text = await generateText(
428
- llamacpp
427
+ const text = await generateText({
428
+ model: llamacpp
429
429
  .CompletionTextGenerator({
430
430
  // run https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF with llama.cpp
431
431
  promptTemplate: llamacpp.prompt.Llama2, // Set prompt template
@@ -433,11 +433,12 @@ const text = await generateText(
433
433
  maxGenerationTokens: 512,
434
434
  })
435
435
  .withInstructionPrompt(),
436
- {
436
+
437
+ prompt: {
437
438
  system: "You are a story writer.",
438
439
  instruction: "Write a short story about a robot learning to love.",
439
- }
440
- );
440
+ },
441
+ });
441
442
  ```
442
443
 
443
444
  They can also be accessed through the shorthand methods `.withTextPrompt()`, `.withChatPrompt()` and `.withInstructionPrompt()` for many models:
@@ -445,13 +446,14 @@ They can also be accessed through the shorthand methods `.withTextPrompt()`, `.w
445
446
  #### Chat Prompt Example
446
447
 
447
448
  ```ts
448
- const textStream = await streamText(
449
- openai
449
+ const textStream = await streamText({
450
+ model: openai
450
451
  .ChatTextGenerator({
451
452
  model: "gpt-3.5-turbo",
452
453
  })
453
454
  .withChatPrompt(),
454
- {
455
+
456
+ prompt: {
455
457
  system: "You are a celebrated poet.",
456
458
  messages: [
457
459
  {
@@ -467,8 +469,8 @@ const textStream = await streamText(
467
469
  content: "Write a short story about Robbie learning to love",
468
470
  },
469
471
  ],
470
- }
471
- );
472
+ },
473
+ });
472
474
  ```
473
475
 
474
476
  | Prompt Template | Text Prompt | Instruction Prompt | Chat Prompt |
@@ -504,19 +506,19 @@ const image = await generateImage(
504
506
 
505
507
  ### Metadata and original responses
506
508
 
507
- ModelFusion model functions return rich responses that include the raw (original) response and metadata when you set the `fullResponse` option to `true`.
509
+ ModelFusion model functions return rich responses that include the raw (original) response and metadata when you set the `fullResponse` argument to `true`.
508
510
 
509
511
  ```ts
510
512
  // access the raw response (needs to be typed) and the metadata:
511
- const { text, rawResponse, metadata } = await generateText(
512
- openai.CompletionTextGenerator({
513
+ const { text, rawResponse, metadata } = await generateText({
514
+ model: openai.CompletionTextGenerator({
513
515
  model: "gpt-3.5-turbo-instruct",
514
516
  maxGenerationTokens: 1000,
515
517
  n: 2, // generate 2 completions
516
518
  }),
517
- "Write a short story about a robot learning to love:\n\n",
518
- { fullResponse: true }
519
- );
519
+ prompt: "Write a short story about a robot learning to love:\n\n",
520
+ fullResponse: true,
521
+ });
520
522
 
521
523
  console.log(metadata);
522
524
 
@@ -39,7 +39,10 @@ class SemanticClassifier {
39
39
  }
40
40
  const embeddings = [];
41
41
  for (const cluster of this.clusters) {
42
- const clusterEmbeddings = await (0, embed_js_1.embedMany)(this.embeddingModel, cluster.values);
42
+ const clusterEmbeddings = await (0, embed_js_1.embedMany)({
43
+ model: this.embeddingModel,
44
+ values: cluster.values,
45
+ });
43
46
  for (let i = 0; i < clusterEmbeddings.length; i++) {
44
47
  embeddings.push({
45
48
  embedding: clusterEmbeddings[i],
@@ -52,7 +55,10 @@ class SemanticClassifier {
52
55
  return embeddings;
53
56
  }
54
57
  async classify(value) {
55
- const valueEmbedding = await (0, embed_js_1.embed)(this.embeddingModel, value);
58
+ const valueEmbedding = await (0, embed_js_1.embed)({
59
+ model: this.embeddingModel,
60
+ value,
61
+ });
56
62
  const clusterEmbeddings = await this.getEmbeddings();
57
63
  const allMatches = [];
58
64
  for (const embedding of clusterEmbeddings) {
@@ -36,7 +36,10 @@ export class SemanticClassifier {
36
36
  }
37
37
  const embeddings = [];
38
38
  for (const cluster of this.clusters) {
39
- const clusterEmbeddings = await embedMany(this.embeddingModel, cluster.values);
39
+ const clusterEmbeddings = await embedMany({
40
+ model: this.embeddingModel,
41
+ values: cluster.values,
42
+ });
40
43
  for (let i = 0; i < clusterEmbeddings.length; i++) {
41
44
  embeddings.push({
42
45
  embedding: clusterEmbeddings[i],
@@ -49,7 +52,10 @@ export class SemanticClassifier {
49
52
  return embeddings;
50
53
  }
51
54
  async classify(value) {
52
- const valueEmbedding = await embed(this.embeddingModel, value);
55
+ const valueEmbedding = await embed({
56
+ model: this.embeddingModel,
57
+ value,
58
+ });
53
59
  const clusterEmbeddings = await this.getEmbeddings();
54
60
  const allMatches = [];
55
61
  for (const embedding of clusterEmbeddings) {
@@ -22,6 +22,9 @@ export interface BaseModelCallStartedEvent extends BaseFunctionStartedEvent {
22
22
  }
23
23
  export type BaseModelCallFinishedEventResult = {
24
24
  status: "success";
25
+ /**
26
+ * The original model response.
27
+ */
25
28
  rawResponse: unknown;
26
29
  value: unknown;
27
30
  /**
@@ -2,8 +2,8 @@
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.embed = exports.embedMany = void 0;
4
4
  const executeStandardCall_js_1 = require("../executeStandardCall.cjs");
5
- async function embedMany(model, values, options) {
6
- const fullResponse = await (0, executeStandardCall_js_1.executeStandardCall)({
5
+ async function embedMany({ model, values, fullResponse, ...options }) {
6
+ const callResponse = await (0, executeStandardCall_js_1.executeStandardCall)({
7
7
  functionType: "embed",
8
8
  input: values,
9
9
  model,
@@ -43,17 +43,17 @@ async function embedMany(model, values, options) {
43
43
  };
44
44
  },
45
45
  });
46
- return options?.fullResponse
46
+ return fullResponse
47
47
  ? {
48
- embeddings: fullResponse.value,
49
- rawResponse: fullResponse.rawResponse,
50
- metadata: fullResponse.metadata,
48
+ embeddings: callResponse.value,
49
+ rawResponse: callResponse.rawResponse,
50
+ metadata: callResponse.metadata,
51
51
  }
52
- : fullResponse.value;
52
+ : callResponse.value;
53
53
  }
54
54
  exports.embedMany = embedMany;
55
- async function embed(model, value, options) {
56
- const fullResponse = await (0, executeStandardCall_js_1.executeStandardCall)({
55
+ async function embed({ model, value, fullResponse, ...options }) {
56
+ const callResponse = await (0, executeStandardCall_js_1.executeStandardCall)({
57
57
  functionType: "embed",
58
58
  input: value,
59
59
  model,
@@ -66,12 +66,12 @@ async function embed(model, value, options) {
66
66
  };
67
67
  },
68
68
  });
69
- return options?.fullResponse
69
+ return fullResponse
70
70
  ? {
71
- embedding: fullResponse.value,
72
- rawResponse: fullResponse.rawResponse,
73
- metadata: fullResponse.metadata,
71
+ embedding: callResponse.value,
72
+ rawResponse: callResponse.rawResponse,
73
+ metadata: callResponse.metadata,
74
74
  }
75
- : fullResponse.value;
75
+ : callResponse.value;
76
76
  }
77
77
  exports.embed = embed;
@@ -8,26 +8,29 @@ import { EmbeddingModel, EmbeddingModelSettings } from "./EmbeddingModel.js";
8
8
  * @see https://modelfusion.dev/guide/function/embed
9
9
  *
10
10
  * @example
11
- * const embeddings = await embedMany(
12
- * openai.TextEmbedder(...),
13
- * [
11
+ * const embeddings = await embedMany({
12
+ * embedder: openai.TextEmbedder(...),
13
+ * values: [
14
14
  * "At first, Nox didn't know what to do with the pup.",
15
15
  * "He keenly observed and absorbed everything around him, from the birds in the sky to the trees in the forest.",
16
16
  * ]
17
- * );
17
+ * });
18
18
  *
19
19
  * @param {EmbeddingModel<VALUE, EmbeddingModelSettings>} model - The model to use for generating embeddings.
20
20
  * @param {VALUE[]} values - The values to generate embeddings for.
21
- * @param {FunctionOptions} [options] - Optional settings for the function.
22
21
  *
23
22
  * @returns {Promise<Vector[]>} - A promise that resolves to an array of vectors representing the embeddings.
24
23
  */
25
- export declare function embedMany<VALUE>(model: EmbeddingModel<VALUE, EmbeddingModelSettings>, values: VALUE[], options?: FunctionOptions & {
24
+ export declare function embedMany<VALUE>(args: {
25
+ model: EmbeddingModel<VALUE, EmbeddingModelSettings>;
26
+ values: VALUE[];
26
27
  fullResponse?: false;
27
- }): Promise<Vector[]>;
28
- export declare function embedMany<VALUE>(model: EmbeddingModel<VALUE, EmbeddingModelSettings>, values: VALUE[], options: FunctionOptions & {
28
+ } & FunctionOptions): Promise<Vector[]>;
29
+ export declare function embedMany<VALUE>(args: {
30
+ model: EmbeddingModel<VALUE, EmbeddingModelSettings>;
31
+ values: VALUE[];
29
32
  fullResponse: true;
30
- }): Promise<{
33
+ } & FunctionOptions): Promise<{
31
34
  embeddings: Vector[];
32
35
  rawResponse: unknown;
33
36
  metadata: ModelCallMetadata;
@@ -38,23 +41,26 @@ export declare function embedMany<VALUE>(model: EmbeddingModel<VALUE, EmbeddingM
38
41
  * @see https://modelfusion.dev/guide/function/embed
39
42
  *
40
43
  * @example
41
- * const embedding = await embed(
42
- * openai.TextEmbedder(...),
43
- * "At first, Nox didn't know what to do with the pup."
44
- * );
44
+ * const embedding = await embed({
45
+ * model: openai.TextEmbedder(...),
46
+ * value: "At first, Nox didn't know what to do with the pup."
47
+ * });
45
48
  *
46
49
  * @param {EmbeddingModel<VALUE, EmbeddingModelSettings>} model - The model to use for generating the embedding.
47
50
  * @param {VALUE} value - The value to generate an embedding for.
48
- * @param {FunctionOptions} [options] - Optional settings for the function.
49
51
  *
50
52
  * @returns {Promise<Vector>} - A promise that resolves to a vector representing the embedding.
51
53
  */
52
- export declare function embed<VALUE>(model: EmbeddingModel<VALUE, EmbeddingModelSettings>, value: VALUE, options?: FunctionOptions & {
54
+ export declare function embed<VALUE>(args: {
55
+ model: EmbeddingModel<VALUE, EmbeddingModelSettings>;
56
+ value: VALUE;
53
57
  fullResponse?: false;
54
- }): Promise<Vector>;
55
- export declare function embed<VALUE>(model: EmbeddingModel<VALUE, EmbeddingModelSettings>, value: VALUE, options: FunctionOptions & {
58
+ } & FunctionOptions): Promise<Vector>;
59
+ export declare function embed<VALUE>(args: {
60
+ model: EmbeddingModel<VALUE, EmbeddingModelSettings>;
61
+ value: VALUE;
56
62
  fullResponse: true;
57
- }): Promise<{
63
+ } & FunctionOptions): Promise<{
58
64
  embedding: Vector;
59
65
  rawResponse: unknown;
60
66
  metadata: ModelCallMetadata;
@@ -1,6 +1,6 @@
1
1
  import { executeStandardCall } from "../executeStandardCall.js";
2
- export async function embedMany(model, values, options) {
3
- const fullResponse = await executeStandardCall({
2
+ export async function embedMany({ model, values, fullResponse, ...options }) {
3
+ const callResponse = await executeStandardCall({
4
4
  functionType: "embed",
5
5
  input: values,
6
6
  model,
@@ -40,16 +40,16 @@ export async function embedMany(model, values, options) {
40
40
  };
41
41
  },
42
42
  });
43
- return options?.fullResponse
43
+ return fullResponse
44
44
  ? {
45
- embeddings: fullResponse.value,
46
- rawResponse: fullResponse.rawResponse,
47
- metadata: fullResponse.metadata,
45
+ embeddings: callResponse.value,
46
+ rawResponse: callResponse.rawResponse,
47
+ metadata: callResponse.metadata,
48
48
  }
49
- : fullResponse.value;
49
+ : callResponse.value;
50
50
  }
51
- export async function embed(model, value, options) {
52
- const fullResponse = await executeStandardCall({
51
+ export async function embed({ model, value, fullResponse, ...options }) {
52
+ const callResponse = await executeStandardCall({
53
53
  functionType: "embed",
54
54
  input: value,
55
55
  model,
@@ -62,11 +62,11 @@ export async function embed(model, value, options) {
62
62
  };
63
63
  },
64
64
  });
65
- return options?.fullResponse
65
+ return fullResponse
66
66
  ? {
67
- embedding: fullResponse.value,
68
- rawResponse: fullResponse.rawResponse,
69
- metadata: fullResponse.metadata,
67
+ embedding: callResponse.value,
68
+ rawResponse: callResponse.rawResponse,
69
+ metadata: callResponse.metadata,
70
70
  }
71
- : fullResponse.value;
71
+ : callResponse.value;
72
72
  }