modelfusion 0.126.0 → 0.128.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (84) hide show
  1. package/CHANGELOG.md +84 -0
  2. package/README.md +6 -11
  3. package/core/api/postToApi.cjs +2 -2
  4. package/core/api/postToApi.d.ts +2 -3
  5. package/core/api/postToApi.js +2 -2
  6. package/core/getFunctionCallLogger.cjs +2 -2
  7. package/core/getFunctionCallLogger.js +2 -2
  8. package/model-function/generate-image/generateImage.cjs +2 -1
  9. package/model-function/generate-image/generateImage.d.ts +4 -5
  10. package/model-function/generate-image/generateImage.js +2 -1
  11. package/model-function/generate-speech/SpeechGenerationEvent.d.ts +1 -2
  12. package/model-function/generate-speech/SpeechGenerationModel.d.ts +3 -4
  13. package/model-function/generate-speech/generateSpeech.d.ts +3 -4
  14. package/model-function/generate-speech/streamSpeech.d.ts +3 -4
  15. package/model-function/generate-structure/StructureFromTextGenerationModel.cjs +2 -2
  16. package/model-function/generate-structure/StructureFromTextGenerationModel.js +2 -2
  17. package/model-function/generate-structure/StructureFromTextStreamingModel.cjs +1 -1
  18. package/model-function/generate-structure/StructureFromTextStreamingModel.js +1 -1
  19. package/model-function/generate-structure/streamStructure.cjs +14 -12
  20. package/model-function/generate-structure/streamStructure.d.ts +11 -29
  21. package/model-function/generate-structure/streamStructure.js +14 -12
  22. package/model-function/generate-text/prompt-template/ContentPart.cjs +9 -1
  23. package/model-function/generate-text/prompt-template/ContentPart.d.ts +3 -2
  24. package/model-function/generate-text/prompt-template/ContentPart.js +7 -0
  25. package/model-function/generate-text/streamText.cjs +1 -1
  26. package/model-function/generate-text/streamText.d.ts +1 -1
  27. package/model-function/generate-text/streamText.js +1 -1
  28. package/model-provider/elevenlabs/ElevenLabsSpeechModel.cjs +2 -1
  29. package/model-provider/elevenlabs/ElevenLabsSpeechModel.d.ts +2 -3
  30. package/model-provider/elevenlabs/ElevenLabsSpeechModel.js +2 -1
  31. package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.cjs +2 -2
  32. package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.js +3 -3
  33. package/model-provider/lmnt/LmntSpeechModel.cjs +2 -1
  34. package/model-provider/lmnt/LmntSpeechModel.d.ts +1 -2
  35. package/model-provider/lmnt/LmntSpeechModel.js +2 -1
  36. package/model-provider/mistral/MistralChatModel.test.cjs +2 -2
  37. package/model-provider/mistral/MistralChatModel.test.js +2 -2
  38. package/model-provider/ollama/OllamaChatModel.cjs +1 -1
  39. package/model-provider/ollama/OllamaChatModel.d.ts +5 -5
  40. package/model-provider/ollama/OllamaChatModel.js +1 -1
  41. package/model-provider/ollama/OllamaChatPromptTemplate.cjs +1 -1
  42. package/model-provider/ollama/OllamaChatPromptTemplate.js +2 -2
  43. package/model-provider/ollama/OllamaCompletionModel.cjs +2 -2
  44. package/model-provider/ollama/OllamaCompletionModel.d.ts +5 -5
  45. package/model-provider/ollama/OllamaCompletionModel.js +2 -2
  46. package/model-provider/ollama/OllamaCompletionModel.test.cjs +4 -6
  47. package/model-provider/ollama/OllamaCompletionModel.test.js +4 -6
  48. package/model-provider/openai/OpenAIChatMessage.cjs +5 -4
  49. package/model-provider/openai/OpenAIChatMessage.js +5 -4
  50. package/model-provider/openai/OpenAIChatModel.test.cjs +4 -6
  51. package/model-provider/openai/OpenAIChatModel.test.js +4 -6
  52. package/model-provider/openai/OpenAISpeechModel.d.ts +1 -2
  53. package/model-provider/openai/OpenAITranscriptionModel.d.ts +1 -2
  54. package/model-provider/openai-compatible/FireworksAIApiConfiguration.cjs +6 -0
  55. package/model-provider/openai-compatible/FireworksAIApiConfiguration.d.ts +3 -1
  56. package/model-provider/openai-compatible/FireworksAIApiConfiguration.js +6 -0
  57. package/model-provider/openai-compatible/OpenAICompatibleApiConfiguration.d.ts +5 -0
  58. package/model-provider/openai-compatible/OpenAICompatibleChatModel.cjs +1 -1
  59. package/model-provider/openai-compatible/OpenAICompatibleChatModel.d.ts +2 -3
  60. package/model-provider/openai-compatible/OpenAICompatibleChatModel.js +1 -1
  61. package/model-provider/openai-compatible/OpenAICompatibleCompletionModel.cjs +1 -1
  62. package/model-provider/openai-compatible/OpenAICompatibleCompletionModel.d.ts +2 -3
  63. package/model-provider/openai-compatible/OpenAICompatibleCompletionModel.js +1 -1
  64. package/model-provider/openai-compatible/OpenAICompatibleTextEmbeddingModel.cjs +1 -1
  65. package/model-provider/openai-compatible/OpenAICompatibleTextEmbeddingModel.d.ts +2 -3
  66. package/model-provider/openai-compatible/OpenAICompatibleTextEmbeddingModel.js +1 -1
  67. package/model-provider/openai-compatible/PerplexityApiConfiguration.cjs +6 -0
  68. package/model-provider/openai-compatible/PerplexityApiConfiguration.d.ts +3 -1
  69. package/model-provider/openai-compatible/PerplexityApiConfiguration.js +6 -0
  70. package/model-provider/openai-compatible/TogetherAIApiConfiguration.cjs +6 -0
  71. package/model-provider/openai-compatible/TogetherAIApiConfiguration.d.ts +3 -1
  72. package/model-provider/openai-compatible/TogetherAIApiConfiguration.js +6 -0
  73. package/model-provider/openai-compatible/index.cjs +1 -1
  74. package/model-provider/openai-compatible/index.d.ts +1 -1
  75. package/model-provider/openai-compatible/index.js +1 -1
  76. package/model-provider/stability/StabilityImageGenerationModel.d.ts +5 -5
  77. package/model-provider/whispercpp/WhisperCppTranscriptionModel.d.ts +1 -2
  78. package/package.json +3 -2
  79. package/util/UInt8Utils.cjs +50 -0
  80. package/util/UInt8Utils.d.ts +3 -0
  81. package/util/UInt8Utils.js +44 -0
  82. package/model-provider/openai-compatible/OpenAICompatibleProviderName.d.ts +0 -1
  83. /package/model-provider/openai-compatible/{OpenAICompatibleProviderName.cjs → OpenAICompatibleApiConfiguration.cjs} +0 -0
  84. /package/model-provider/openai-compatible/{OpenAICompatibleProviderName.js → OpenAICompatibleApiConfiguration.js} +0 -0
@@ -46,7 +46,7 @@ async function streamText({ model, prompt, fullResponse, ...options }) {
46
46
  return fullResponse
47
47
  ? {
48
48
  textStream: callResponse.value,
49
- text: textPromise,
49
+ textPromise,
50
50
  metadata: callResponse.metadata,
51
51
  }
52
52
  : callResponse.value;
@@ -37,6 +37,6 @@ export declare function streamText<PROMPT>(args: {
37
37
  fullResponse: true;
38
38
  } & FunctionOptions): Promise<{
39
39
  textStream: AsyncIterable<string>;
40
- text: PromiseLike<string>;
40
+ textPromise: PromiseLike<string>;
41
41
  metadata: Omit<ModelCallMetadata, "durationInMs" | "finishTimestamp">;
42
42
  }>;
@@ -43,7 +43,7 @@ export async function streamText({ model, prompt, fullResponse, ...options }) {
43
43
  return fullResponse
44
44
  ? {
45
45
  textStream: callResponse.value,
46
- text: textPromise,
46
+ textPromise,
47
47
  metadata: callResponse.metadata,
48
48
  }
49
49
  : callResponse.value;
@@ -9,6 +9,7 @@ const parseJSON_js_1 = require("../../core/schema/parseJSON.cjs");
9
9
  const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
10
10
  const AsyncQueue_js_1 = require("../../util/AsyncQueue.cjs");
11
11
  const SimpleWebSocket_js_1 = require("../../util/SimpleWebSocket.cjs");
12
+ const UInt8Utils_js_1 = require("../../util/UInt8Utils.cjs");
12
13
  const ElevenLabsApiConfiguration_js_1 = require("./ElevenLabsApiConfiguration.cjs");
13
14
  const elevenLabsModels = [
14
15
  "eleven_multilingual_v2",
@@ -141,7 +142,7 @@ class ElevenLabsSpeechModel extends AbstractModel_js_1.AbstractModel {
141
142
  if (!response.isFinal) {
142
143
  queue.push({
143
144
  type: "delta",
144
- deltaValue: Buffer.from(response.audio, "base64"),
145
+ deltaValue: (0, UInt8Utils_js_1.base64ToUint8Array)(response.audio),
145
146
  });
146
147
  }
147
148
  };
@@ -1,4 +1,3 @@
1
- /// <reference types="node" />
2
1
  import { FunctionCallOptions } from "../../core/FunctionOptions.js";
3
2
  import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
4
3
  import { AbstractModel } from "../../model-function/AbstractModel.js";
@@ -37,8 +36,8 @@ export declare class ElevenLabsSpeechModel extends AbstractModel<ElevenLabsSpeec
37
36
  get modelName(): string;
38
37
  private callAPI;
39
38
  get settingsForEvent(): Partial<ElevenLabsSpeechModelSettings>;
40
- doGenerateSpeechStandard(text: string, options: FunctionCallOptions): Promise<Buffer>;
41
- doGenerateSpeechStreamDuplex(textStream: AsyncIterable<string>): Promise<AsyncIterable<Delta<Buffer>>>;
39
+ doGenerateSpeechStandard(text: string, options: FunctionCallOptions): Promise<Uint8Array>;
40
+ doGenerateSpeechStreamDuplex(textStream: AsyncIterable<string>): Promise<AsyncIterable<Delta<Uint8Array>>>;
42
41
  withSettings(additionalSettings: Partial<ElevenLabsSpeechModelSettings>): this;
43
42
  }
44
43
  export {};
@@ -6,6 +6,7 @@ import { safeParseJSON } from "../../core/schema/parseJSON.js";
6
6
  import { AbstractModel } from "../../model-function/AbstractModel.js";
7
7
  import { AsyncQueue } from "../../util/AsyncQueue.js";
8
8
  import { createSimpleWebSocket } from "../../util/SimpleWebSocket.js";
9
+ import { base64ToUint8Array } from "../../util/UInt8Utils.js";
9
10
  import { ElevenLabsApiConfiguration } from "./ElevenLabsApiConfiguration.js";
10
11
  const elevenLabsModels = [
11
12
  "eleven_multilingual_v2",
@@ -138,7 +139,7 @@ export class ElevenLabsSpeechModel extends AbstractModel {
138
139
  if (!response.isFinal) {
139
140
  queue.push({
140
141
  type: "delta",
141
- deltaValue: Buffer.from(response.audio, "base64"),
142
+ deltaValue: base64ToUint8Array(response.audio),
142
143
  });
143
144
  }
144
145
  };
@@ -46,7 +46,7 @@ function instruction() {
46
46
  }
47
47
  case "image": {
48
48
  text += `[img-${imageCounter}]`;
49
- images[imageCounter.toString()] = content.base64Image;
49
+ images[imageCounter.toString()] = (0, ContentPart_js_1.getImageAsBase64)(content.image);
50
50
  imageCounter++;
51
51
  break;
52
52
  }
@@ -85,7 +85,7 @@ function chat() {
85
85
  }
86
86
  case "image": {
87
87
  text += `[img-${imageCounter}]`;
88
- images[imageCounter.toString()] = part.base64Image;
88
+ images[imageCounter.toString()] = (0, ContentPart_js_1.getImageAsBase64)(part.image);
89
89
  imageCounter++;
90
90
  break;
91
91
  }
@@ -1,4 +1,4 @@
1
- import { validateContentIsString } from "../../model-function/generate-text/prompt-template/ContentPart.js";
1
+ import { getImageAsBase64, validateContentIsString, } from "../../model-function/generate-text/prompt-template/ContentPart.js";
2
2
  import { InvalidPromptError } from "../../model-function/generate-text/prompt-template/InvalidPromptError.js";
3
3
  import { text as vicunaText } from "../../model-function/generate-text/prompt-template/TextPromptTemplate.js";
4
4
  // default Vicuna 1 system message
@@ -42,7 +42,7 @@ export function instruction() {
42
42
  }
43
43
  case "image": {
44
44
  text += `[img-${imageCounter}]`;
45
- images[imageCounter.toString()] = content.base64Image;
45
+ images[imageCounter.toString()] = getImageAsBase64(content.image);
46
46
  imageCounter++;
47
47
  break;
48
48
  }
@@ -80,7 +80,7 @@ export function chat() {
80
80
  }
81
81
  case "image": {
82
82
  text += `[img-${imageCounter}]`;
83
- images[imageCounter.toString()] = part.base64Image;
83
+ images[imageCounter.toString()] = getImageAsBase64(part.image);
84
84
  imageCounter++;
85
85
  break;
86
86
  }
@@ -6,6 +6,7 @@ const callWithRetryAndThrottle_js_1 = require("../../core/api/callWithRetryAndTh
6
6
  const postToApi_js_1 = require("../../core/api/postToApi.cjs");
7
7
  const ZodSchema_js_1 = require("../../core/schema/ZodSchema.cjs");
8
8
  const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
9
+ const UInt8Utils_js_1 = require("../../util/UInt8Utils.cjs");
9
10
  const LmntApiConfiguration_js_1 = require("./LmntApiConfiguration.cjs");
10
11
  /**
11
12
  * Synthesize speech using the LMNT API.
@@ -81,7 +82,7 @@ class LmntSpeechModel extends AbstractModel_js_1.AbstractModel {
81
82
  }
82
83
  async doGenerateSpeechStandard(text, options) {
83
84
  const response = await this.callAPI(text, options);
84
- return Buffer.from(response.audio, "base64");
85
+ return (0, UInt8Utils_js_1.base64ToUint8Array)(response.audio);
85
86
  }
86
87
  withSettings(additionalSettings) {
87
88
  return new LmntSpeechModel({
@@ -1,4 +1,3 @@
1
- /// <reference types="node" />
2
1
  import { z } from "zod";
3
2
  import { FunctionCallOptions } from "../../core/FunctionOptions.js";
4
3
  import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
@@ -34,7 +33,7 @@ export declare class LmntSpeechModel extends AbstractModel<LmntSpeechModelSettin
34
33
  get modelName(): string;
35
34
  private callAPI;
36
35
  get settingsForEvent(): Partial<LmntSpeechModelSettings>;
37
- doGenerateSpeechStandard(text: string, options: FunctionCallOptions): Promise<Buffer>;
36
+ doGenerateSpeechStandard(text: string, options: FunctionCallOptions): Promise<Uint8Array>;
38
37
  withSettings(additionalSettings: Partial<LmntSpeechModelSettings>): this;
39
38
  }
40
39
  declare const lmntSpeechResponseSchema: z.ZodObject<{
@@ -3,6 +3,7 @@ import { callWithRetryAndThrottle } from "../../core/api/callWithRetryAndThrottl
3
3
  import { createJsonResponseHandler, createTextErrorResponseHandler, postToApi, } from "../../core/api/postToApi.js";
4
4
  import { zodSchema } from "../../core/schema/ZodSchema.js";
5
5
  import { AbstractModel } from "../../model-function/AbstractModel.js";
6
+ import { base64ToUint8Array } from "../../util/UInt8Utils.js";
6
7
  import { LmntApiConfiguration } from "./LmntApiConfiguration.js";
7
8
  /**
8
9
  * Synthesize speech using the LMNT API.
@@ -78,7 +79,7 @@ export class LmntSpeechModel extends AbstractModel {
78
79
  }
79
80
  async doGenerateSpeechStandard(text, options) {
80
81
  const response = await this.callAPI(text, options);
81
- return Buffer.from(response.audio, "base64");
82
+ return base64ToUint8Array(response.audio);
82
83
  }
83
84
  withSettings(additionalSettings) {
84
85
  return new LmntSpeechModel({
@@ -44,7 +44,7 @@ describe("streamText", () => {
44
44
  ]);
45
45
  });
46
46
  it("should return text", async () => {
47
- const { text } = await (0, streamText_js_1.streamText)({
47
+ const { textPromise } = await (0, streamText_js_1.streamText)({
48
48
  model: new MistralChatModel_js_1.MistralChatModel({
49
49
  api: new MistralApiConfiguration_js_1.MistralApiConfiguration({ apiKey: "test-key" }),
50
50
  model: "mistral-tiny",
@@ -52,7 +52,7 @@ describe("streamText", () => {
52
52
  prompt: "hello",
53
53
  fullResponse: true,
54
54
  });
55
- expect(await text).toStrictEqual("Hello, world!");
55
+ expect(await textPromise).toStrictEqual("Hello, world!");
56
56
  });
57
57
  });
58
58
  });
@@ -42,7 +42,7 @@ describe("streamText", () => {
42
42
  ]);
43
43
  });
44
44
  it("should return text", async () => {
45
- const { text } = await streamText({
45
+ const { textPromise } = await streamText({
46
46
  model: new MistralChatModel({
47
47
  api: new MistralApiConfiguration({ apiKey: "test-key" }),
48
48
  model: "mistral-tiny",
@@ -50,7 +50,7 @@ describe("streamText", () => {
50
50
  prompt: "hello",
51
51
  fullResponse: true,
52
52
  });
53
- expect(await text).toStrictEqual("Hello, world!");
53
+ expect(await textPromise).toStrictEqual("Hello, world!");
54
54
  });
55
55
  });
56
56
  });
@@ -231,7 +231,7 @@ const ollamaChatStreamChunkSchema = zod_1.z.discriminatedUnion("done", [
231
231
  created_at: zod_1.z.string(),
232
232
  total_duration: zod_1.z.number(),
233
233
  load_duration: zod_1.z.number().optional(),
234
- prompt_eval_count: zod_1.z.number(),
234
+ prompt_eval_count: zod_1.z.number().optional(),
235
235
  prompt_eval_duration: zod_1.z.number().optional(),
236
236
  eval_count: zod_1.z.number(),
237
237
  eval_duration: zod_1.z.number(),
@@ -94,10 +94,10 @@ export declare class OllamaChatModel extends AbstractModel<OllamaChatModelSettin
94
94
  done: true;
95
95
  created_at: string;
96
96
  total_duration: number;
97
- prompt_eval_count: number;
98
97
  eval_count: number;
99
98
  eval_duration: number;
100
99
  load_duration?: number | undefined;
100
+ prompt_eval_count?: number | undefined;
101
101
  prompt_eval_duration?: number | undefined;
102
102
  }>>>;
103
103
  extractTextDelta(delta: unknown): string | undefined;
@@ -197,7 +197,7 @@ declare const ollamaChatStreamChunkSchema: z.ZodDiscriminatedUnion<"done", [z.Zo
197
197
  created_at: z.ZodString;
198
198
  total_duration: z.ZodNumber;
199
199
  load_duration: z.ZodOptional<z.ZodNumber>;
200
- prompt_eval_count: z.ZodNumber;
200
+ prompt_eval_count: z.ZodOptional<z.ZodNumber>;
201
201
  prompt_eval_duration: z.ZodOptional<z.ZodNumber>;
202
202
  eval_count: z.ZodNumber;
203
203
  eval_duration: z.ZodNumber;
@@ -206,20 +206,20 @@ declare const ollamaChatStreamChunkSchema: z.ZodDiscriminatedUnion<"done", [z.Zo
206
206
  done: true;
207
207
  created_at: string;
208
208
  total_duration: number;
209
- prompt_eval_count: number;
210
209
  eval_count: number;
211
210
  eval_duration: number;
212
211
  load_duration?: number | undefined;
212
+ prompt_eval_count?: number | undefined;
213
213
  prompt_eval_duration?: number | undefined;
214
214
  }, {
215
215
  model: string;
216
216
  done: true;
217
217
  created_at: string;
218
218
  total_duration: number;
219
- prompt_eval_count: number;
220
219
  eval_count: number;
221
220
  eval_duration: number;
222
221
  load_duration?: number | undefined;
222
+ prompt_eval_count?: number | undefined;
223
223
  prompt_eval_duration?: number | undefined;
224
224
  }>]>;
225
225
  export type OllamaChatStreamChunk = z.infer<typeof ollamaChatStreamChunkSchema>;
@@ -274,10 +274,10 @@ export declare const OllamaChatResponseFormat: {
274
274
  done: true;
275
275
  created_at: string;
276
276
  total_duration: number;
277
- prompt_eval_count: number;
278
277
  eval_count: number;
279
278
  eval_duration: number;
280
279
  load_duration?: number | undefined;
280
+ prompt_eval_count?: number | undefined;
281
281
  prompt_eval_duration?: number | undefined;
282
282
  }>>>;
283
283
  };
@@ -227,7 +227,7 @@ const ollamaChatStreamChunkSchema = z.discriminatedUnion("done", [
227
227
  created_at: z.string(),
228
228
  total_duration: z.number(),
229
229
  load_duration: z.number().optional(),
230
- prompt_eval_count: z.number(),
230
+ prompt_eval_count: z.number().optional(),
231
231
  prompt_eval_duration: z.number().optional(),
232
232
  eval_count: z.number(),
233
233
  eval_duration: z.number(),
@@ -95,7 +95,7 @@ function extractUserContent(input) {
95
95
  content += part.text;
96
96
  }
97
97
  else {
98
- images.push(part.base64Image);
98
+ images.push((0, ContentPart_js_1.getImageAsBase64)(part.image));
99
99
  }
100
100
  }
101
101
  return { content, images };
@@ -1,4 +1,4 @@
1
- import { validateContentIsString } from "../../model-function/generate-text/prompt-template/ContentPart.js";
1
+ import { getImageAsBase64, validateContentIsString, } from "../../model-function/generate-text/prompt-template/ContentPart.js";
2
2
  import { InvalidPromptError } from "../../model-function/generate-text/prompt-template/InvalidPromptError.js";
3
3
  /**
4
4
  * OllamaChatPrompt identity chat format.
@@ -88,7 +88,7 @@ function extractUserContent(input) {
88
88
  content += part.text;
89
89
  }
90
90
  else {
91
- images.push(part.base64Image);
91
+ images.push(getImageAsBase64(part.image));
92
92
  }
93
93
  }
94
94
  return { content, images };
@@ -180,7 +180,7 @@ class OllamaCompletionModel extends AbstractModel_js_1.AbstractModel {
180
180
  return this.settings.promptTemplate ?? OllamaCompletionPrompt_js_1.Text;
181
181
  }
182
182
  withJsonOutput() {
183
- return this;
183
+ return this.withSettings({ format: "json" });
184
184
  }
185
185
  withTextPrompt() {
186
186
  return this.withPromptTemplate(this.promptTemplateProvider.text());
@@ -235,7 +235,7 @@ const ollamaCompletionStreamChunkSchema = zod_1.z.discriminatedUnion("done", [
235
235
  load_duration: zod_1.z.number().optional(),
236
236
  sample_count: zod_1.z.number().optional(),
237
237
  sample_duration: zod_1.z.number().optional(),
238
- prompt_eval_count: zod_1.z.number(),
238
+ prompt_eval_count: zod_1.z.number().optional(),
239
239
  prompt_eval_duration: zod_1.z.number().optional(),
240
240
  eval_count: zod_1.z.number(),
241
241
  eval_duration: zod_1.z.number(),
@@ -127,12 +127,12 @@ export declare class OllamaCompletionModel<CONTEXT_WINDOW_SIZE extends number |
127
127
  done: true;
128
128
  created_at: string;
129
129
  total_duration: number;
130
- prompt_eval_count: number;
131
130
  eval_count: number;
132
131
  eval_duration: number;
133
132
  load_duration?: number | undefined;
134
133
  sample_count?: number | undefined;
135
134
  sample_duration?: number | undefined;
135
+ prompt_eval_count?: number | undefined;
136
136
  prompt_eval_duration?: number | undefined;
137
137
  context?: number[] | undefined;
138
138
  }>>>;
@@ -209,7 +209,7 @@ declare const ollamaCompletionStreamChunkSchema: z.ZodDiscriminatedUnion<"done",
209
209
  load_duration: z.ZodOptional<z.ZodNumber>;
210
210
  sample_count: z.ZodOptional<z.ZodNumber>;
211
211
  sample_duration: z.ZodOptional<z.ZodNumber>;
212
- prompt_eval_count: z.ZodNumber;
212
+ prompt_eval_count: z.ZodOptional<z.ZodNumber>;
213
213
  prompt_eval_duration: z.ZodOptional<z.ZodNumber>;
214
214
  eval_count: z.ZodNumber;
215
215
  eval_duration: z.ZodNumber;
@@ -219,12 +219,12 @@ declare const ollamaCompletionStreamChunkSchema: z.ZodDiscriminatedUnion<"done",
219
219
  done: true;
220
220
  created_at: string;
221
221
  total_duration: number;
222
- prompt_eval_count: number;
223
222
  eval_count: number;
224
223
  eval_duration: number;
225
224
  load_duration?: number | undefined;
226
225
  sample_count?: number | undefined;
227
226
  sample_duration?: number | undefined;
227
+ prompt_eval_count?: number | undefined;
228
228
  prompt_eval_duration?: number | undefined;
229
229
  context?: number[] | undefined;
230
230
  }, {
@@ -232,12 +232,12 @@ declare const ollamaCompletionStreamChunkSchema: z.ZodDiscriminatedUnion<"done",
232
232
  done: true;
233
233
  created_at: string;
234
234
  total_duration: number;
235
- prompt_eval_count: number;
236
235
  eval_count: number;
237
236
  eval_duration: number;
238
237
  load_duration?: number | undefined;
239
238
  sample_count?: number | undefined;
240
239
  sample_duration?: number | undefined;
240
+ prompt_eval_count?: number | undefined;
241
241
  prompt_eval_duration?: number | undefined;
242
242
  context?: number[] | undefined;
243
243
  }>]>;
@@ -288,12 +288,12 @@ export declare const OllamaCompletionResponseFormat: {
288
288
  done: true;
289
289
  created_at: string;
290
290
  total_duration: number;
291
- prompt_eval_count: number;
292
291
  eval_count: number;
293
292
  eval_duration: number;
294
293
  load_duration?: number | undefined;
295
294
  sample_count?: number | undefined;
296
295
  sample_duration?: number | undefined;
296
+ prompt_eval_count?: number | undefined;
297
297
  prompt_eval_duration?: number | undefined;
298
298
  context?: number[] | undefined;
299
299
  }>>>;
@@ -177,7 +177,7 @@ export class OllamaCompletionModel extends AbstractModel {
177
177
  return this.settings.promptTemplate ?? Text;
178
178
  }
179
179
  withJsonOutput() {
180
- return this;
180
+ return this.withSettings({ format: "json" });
181
181
  }
182
182
  withTextPrompt() {
183
183
  return this.withPromptTemplate(this.promptTemplateProvider.text());
@@ -231,7 +231,7 @@ const ollamaCompletionStreamChunkSchema = z.discriminatedUnion("done", [
231
231
  load_duration: z.number().optional(),
232
232
  sample_count: z.number().optional(),
233
233
  sample_duration: z.number().optional(),
234
- prompt_eval_count: z.number(),
234
+ prompt_eval_count: z.number().optional(),
235
235
  prompt_eval_duration: z.number().optional(),
236
236
  eval_count: z.number(),
237
237
  eval_duration: z.number(),
@@ -124,13 +124,11 @@ describe("streamStructure", () => {
124
124
  schema: (0, ZodSchema_js_1.zodSchema)(zod_1.z.object({ name: zod_1.z.string() })),
125
125
  prompt: "generate a name",
126
126
  });
127
- // note: space moved to last chunk bc of trimming
128
127
  expect(await (0, arrayFromAsync_js_1.arrayFromAsync)(stream)).toStrictEqual([
129
- { isComplete: false, value: {} },
130
- { isComplete: false, value: { name: "" } },
131
- { isComplete: false, value: { name: "M" } },
132
- { isComplete: false, value: { name: "Mike" } },
133
- { isComplete: true, value: { name: "Mike" } },
128
+ {},
129
+ { name: "" },
130
+ { name: "M" },
131
+ { name: "Mike" },
134
132
  ]);
135
133
  });
136
134
  });
@@ -122,13 +122,11 @@ describe("streamStructure", () => {
122
122
  schema: zodSchema(z.object({ name: z.string() })),
123
123
  prompt: "generate a name",
124
124
  });
125
- // note: space moved to last chunk bc of trimming
126
125
  expect(await arrayFromAsync(stream)).toStrictEqual([
127
- { isComplete: false, value: {} },
128
- { isComplete: false, value: { name: "" } },
129
- { isComplete: false, value: { name: "M" } },
130
- { isComplete: false, value: { name: "Mike" } },
131
- { isComplete: true, value: { name: "Mike" } },
126
+ {},
127
+ { name: "" },
128
+ { name: "M" },
129
+ { name: "Mike" },
132
130
  ]);
133
131
  });
134
132
  });
@@ -1,6 +1,7 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.OpenAIChatMessage = void 0;
4
+ const ContentPart_js_1 = require("../../model-function/generate-text/prompt-template/ContentPart.cjs");
4
5
  exports.OpenAIChatMessage = {
5
6
  /**
6
7
  * Creates a system chat message.
@@ -16,15 +17,15 @@ exports.OpenAIChatMessage = {
16
17
  role: "user",
17
18
  content: typeof content === "string"
18
19
  ? content
19
- : content.map((element) => {
20
- switch (element.type) {
20
+ : content.map((part) => {
21
+ switch (part.type) {
21
22
  case "text": {
22
- return { type: "text", text: element.text };
23
+ return { type: "text", text: part.text };
23
24
  }
24
25
  case "image": {
25
26
  return {
26
27
  type: "image_url",
27
- image_url: `data:${element.mimeType ?? "image/jpeg"};base64,${element.base64Image}`,
28
+ image_url: `data:${part.mimeType ?? "image/jpeg"};base64,${(0, ContentPart_js_1.getImageAsBase64)(part.image)}`,
28
29
  };
29
30
  }
30
31
  }
@@ -1,3 +1,4 @@
1
+ import { getImageAsBase64, } from "../../model-function/generate-text/prompt-template/ContentPart.js";
1
2
  export const OpenAIChatMessage = {
2
3
  /**
3
4
  * Creates a system chat message.
@@ -13,15 +14,15 @@ export const OpenAIChatMessage = {
13
14
  role: "user",
14
15
  content: typeof content === "string"
15
16
  ? content
16
- : content.map((element) => {
17
- switch (element.type) {
17
+ : content.map((part) => {
18
+ switch (part.type) {
18
19
  case "text": {
19
- return { type: "text", text: element.text };
20
+ return { type: "text", text: part.text };
20
21
  }
21
22
  case "image": {
22
23
  return {
23
24
  type: "image_url",
24
- image_url: `data:${element.mimeType ?? "image/jpeg"};base64,${element.base64Image}`,
25
+ image_url: `data:${part.mimeType ?? "image/jpeg"};base64,${getImageAsBase64(part.image)}`,
25
26
  };
26
27
  }
27
28
  }
@@ -89,13 +89,11 @@ describe("streamStructure", () => {
89
89
  schema: (0, ZodSchema_js_1.zodSchema)(zod_1.z.object({ name: zod_1.z.string() })),
90
90
  prompt: "generate a name",
91
91
  });
92
- // note: space moved to last chunk bc of trimming
93
92
  expect(await (0, arrayFromAsync_js_1.arrayFromAsync)(stream)).toStrictEqual([
94
- { isComplete: false, value: {} },
95
- { isComplete: false, value: { name: "" } },
96
- { isComplete: false, value: { name: "M" } },
97
- { isComplete: false, value: { name: "Mike" } },
98
- { isComplete: true, value: { name: "Mike" } },
93
+ {},
94
+ { name: "" },
95
+ { name: "M" },
96
+ { name: "Mike" },
99
97
  ]);
100
98
  });
101
99
  });
@@ -87,13 +87,11 @@ describe("streamStructure", () => {
87
87
  schema: zodSchema(z.object({ name: z.string() })),
88
88
  prompt: "generate a name",
89
89
  });
90
- // note: space moved to last chunk bc of trimming
91
90
  expect(await arrayFromAsync(stream)).toStrictEqual([
92
- { isComplete: false, value: {} },
93
- { isComplete: false, value: { name: "" } },
94
- { isComplete: false, value: { name: "M" } },
95
- { isComplete: false, value: { name: "Mike" } },
96
- { isComplete: true, value: { name: "Mike" } },
91
+ {},
92
+ { name: "" },
93
+ { name: "M" },
94
+ { name: "Mike" },
97
95
  ]);
98
96
  });
99
97
  });
@@ -1,4 +1,3 @@
1
- /// <reference types="node" />
2
1
  import { FunctionCallOptions } from "../../core/FunctionOptions.js";
3
2
  import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
4
3
  import { AbstractModel } from "../../model-function/AbstractModel.js";
@@ -46,7 +45,7 @@ export declare class OpenAISpeechModel extends AbstractModel<OpenAISpeechModelSe
46
45
  get modelName(): "tts-1" | "tts-1-hd";
47
46
  private callAPI;
48
47
  get settingsForEvent(): Partial<OpenAISpeechModelSettings>;
49
- doGenerateSpeechStandard(text: string, options: FunctionCallOptions): Promise<Buffer>;
48
+ doGenerateSpeechStandard(text: string, options: FunctionCallOptions): Promise<Uint8Array>;
50
49
  withSettings(additionalSettings: Partial<OpenAISpeechModelSettings>): this;
51
50
  }
52
51
  export {};
@@ -1,4 +1,3 @@
1
- /// <reference types="node" />
2
1
  import { z } from "zod";
3
2
  import { FunctionCallOptions } from "../../core/FunctionOptions.js";
4
3
  import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
@@ -43,7 +42,7 @@ export interface OpenAITranscriptionModelSettings extends TranscriptionModelSett
43
42
  }
44
43
  export type OpenAITranscriptionInput = {
45
44
  type: "flac" | "m4a" | "mp3" | "mp4" | "mpeg" | "mpga" | "ogg" | "wav" | "webm";
46
- data: Buffer;
45
+ data: Uint8Array;
47
46
  };
48
47
  /**
49
48
  * Create a transcription model that calls the OpenAI transcription API.
@@ -28,6 +28,12 @@ class FireworksAIApiConfiguration extends BaseUrlApiConfiguration_js_1.BaseUrlAp
28
28
  path: "/inference/v1",
29
29
  },
30
30
  });
31
+ Object.defineProperty(this, "provider", {
32
+ enumerable: true,
33
+ configurable: true,
34
+ writable: true,
35
+ value: "openaicompatible-fireworksai"
36
+ });
31
37
  }
32
38
  }
33
39
  exports.FireworksAIApiConfiguration = FireworksAIApiConfiguration;
@@ -1,4 +1,5 @@
1
1
  import { BaseUrlApiConfigurationWithDefaults, PartialBaseUrlPartsApiConfigurationOptions } from "../../core/api/BaseUrlApiConfiguration.js";
2
+ import { OpenAICompatibleApiConfiguration } from "./OpenAICompatibleApiConfiguration.js";
2
3
  /**
3
4
  * Configuration for the Fireworks.ai API.
4
5
  *
@@ -6,8 +7,9 @@ import { BaseUrlApiConfigurationWithDefaults, PartialBaseUrlPartsApiConfiguratio
6
7
  *
7
8
  * @see https://readme.fireworks.ai/docs/openai-compatibility
8
9
  */
9
- export declare class FireworksAIApiConfiguration extends BaseUrlApiConfigurationWithDefaults {
10
+ export declare class FireworksAIApiConfiguration extends BaseUrlApiConfigurationWithDefaults implements OpenAICompatibleApiConfiguration {
10
11
  constructor(settings?: PartialBaseUrlPartsApiConfigurationOptions & {
11
12
  apiKey?: string;
12
13
  });
14
+ readonly provider = "openaicompatible-fireworksai";
13
15
  }
@@ -25,5 +25,11 @@ export class FireworksAIApiConfiguration extends BaseUrlApiConfigurationWithDefa
25
25
  path: "/inference/v1",
26
26
  },
27
27
  });
28
+ Object.defineProperty(this, "provider", {
29
+ enumerable: true,
30
+ configurable: true,
31
+ writable: true,
32
+ value: "openaicompatible-fireworksai"
33
+ });
28
34
  }
29
35
  }
@@ -0,0 +1,5 @@
1
+ import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
2
+ export type OpenAICompatibleProviderName = `openaicompatible` | `openaicompatible-${string}`;
3
+ export interface OpenAICompatibleApiConfiguration extends ApiConfiguration {
4
+ provider?: OpenAICompatibleProviderName;
5
+ }
@@ -37,7 +37,7 @@ class OpenAICompatibleChatModel extends AbstractOpenAIChatModel_js_1.AbstractOpe
37
37
  });
38
38
  }
39
39
  get provider() {
40
- return this.settings.provider ?? "openaicompatible";
40
+ return (this.settings.provider ?? this.settings.api.provider ?? "openaicompatible");
41
41
  }
42
42
  get modelName() {
43
43
  return this.settings.model;