modelfusion 0.126.0 → 0.127.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (25) hide show
  1. package/CHANGELOG.md +55 -0
  2. package/README.md +5 -10
  3. package/model-function/generate-structure/StructureFromTextGenerationModel.cjs +2 -2
  4. package/model-function/generate-structure/StructureFromTextGenerationModel.js +2 -2
  5. package/model-function/generate-structure/StructureFromTextStreamingModel.cjs +1 -1
  6. package/model-function/generate-structure/StructureFromTextStreamingModel.js +1 -1
  7. package/model-function/generate-structure/streamStructure.cjs +14 -12
  8. package/model-function/generate-structure/streamStructure.d.ts +11 -29
  9. package/model-function/generate-structure/streamStructure.js +14 -12
  10. package/model-function/generate-text/streamText.cjs +1 -1
  11. package/model-function/generate-text/streamText.d.ts +1 -1
  12. package/model-function/generate-text/streamText.js +1 -1
  13. package/model-provider/mistral/MistralChatModel.test.cjs +2 -2
  14. package/model-provider/mistral/MistralChatModel.test.js +2 -2
  15. package/model-provider/ollama/OllamaChatModel.cjs +1 -1
  16. package/model-provider/ollama/OllamaChatModel.d.ts +5 -5
  17. package/model-provider/ollama/OllamaChatModel.js +1 -1
  18. package/model-provider/ollama/OllamaCompletionModel.cjs +2 -2
  19. package/model-provider/ollama/OllamaCompletionModel.d.ts +5 -5
  20. package/model-provider/ollama/OllamaCompletionModel.js +2 -2
  21. package/model-provider/ollama/OllamaCompletionModel.test.cjs +4 -6
  22. package/model-provider/ollama/OllamaCompletionModel.test.js +4 -6
  23. package/model-provider/openai/OpenAIChatModel.test.cjs +4 -6
  24. package/model-provider/openai/OpenAIChatModel.test.js +4 -6
  25. package/package.json +3 -2
package/CHANGELOG.md CHANGED
@@ -1,5 +1,60 @@
1
1
  # Changelog
2
2
 
3
+ ## v0.127.0 - 2024-01-15
4
+
5
+ ### Changed
6
+
7
+ - **breaking change**: `streamStructure` returns an async iterable over deep partial objects. If you need to get the fully validated final result, you can use the `fullResponse: true` option and await the `structurePromise` value. Example:
8
+
9
+ ```ts
10
+ const { structureStream, structurePromise } = await streamStructure({
11
+ model: ollama
12
+ .ChatTextGenerator({
13
+ model: "openhermes2.5-mistral",
14
+ maxGenerationTokens: 1024,
15
+ temperature: 0,
16
+ })
17
+ .asStructureGenerationModel(jsonStructurePrompt.text()),
18
+
19
+ schema: zodSchema(
20
+ z.object({
21
+ characters: z.array(
22
+ z.object({
23
+ name: z.string(),
24
+ class: z
25
+ .string()
26
+ .describe("Character class, e.g. warrior, mage, or thief."),
27
+ description: z.string(),
28
+ })
29
+ ),
30
+ })
31
+ ),
32
+
33
+ prompt:
34
+ "Generate 3 character descriptions for a fantasy role playing game.",
35
+
36
+ fullResponse: true,
37
+ });
38
+
39
+ for await (const partialStructure of structureStream) {
40
+ console.clear();
41
+ console.log(partialStructure);
42
+ }
43
+
44
+ const structure = await structurePromise;
45
+
46
+ console.clear();
47
+ console.log("FINAL STRUCTURE");
48
+ console.log(structure);
49
+ ```
50
+
51
+ - **breaking change**: Renamed `text` value in `streamText` with `fullResponse: true` to `textPromise`.
52
+
53
+ ### Fixed
54
+
55
+ - Ollama streaming.
56
+ - Ollama structure generation and streaming.
57
+
3
58
  ## v0.126.0 - 2024-01-15
4
59
 
5
60
  ### Changed
package/README.md CHANGED
@@ -184,14 +184,9 @@ const structureStream = await streamStructure({
184
184
  prompt: "Generate 3 character descriptions for a fantasy role playing game.",
185
185
  });
186
186
 
187
- for await (const part of structureStream) {
188
- if (!part.isComplete) {
189
- const unknownPartialStructure = part.value;
190
- console.log("partial value", unknownPartialStructure);
191
- } else {
192
- const fullyTypedStructure = part.value;
193
- console.log("final value", fullyTypedStructure);
194
- }
187
+ for await (const partialStructure of structureStream) {
188
+ console.clear();
189
+ console.log(partialStructure);
195
190
  }
196
191
  ```
197
192
 
@@ -580,8 +575,8 @@ modelfusion.setLogFormat("detailed-object"); // log full events
580
575
  - [Embed Value](https://modelfusion.dev/guide/function/embed)
581
576
  - [Classify Value](https://modelfusion.dev/guide/function/classify)
582
577
  - [Tools](https://modelfusion.dev/guide/tools)
583
- - [Use Tool](https://modelfusion.dev/guide/tools/run-tool)
584
- - [Use Tools](https://modelfusion.dev/guide/tools/run-tools)
578
+ - [Run Tool](https://modelfusion.dev/guide/tools/run-tool)
579
+ - [Run Tools](https://modelfusion.dev/guide/tools/run-tools)
585
580
  - [Agent Loop](https://modelfusion.dev/guide/tools/agent-loop)
586
581
  - [Available Tools](https://modelfusion.dev/guide/tools/available-tools/)
587
582
  - [Custom Tools](https://modelfusion.dev/guide/tools/custom-tools)
@@ -31,7 +31,7 @@ class StructureFromTextGenerationModel {
31
31
  }
32
32
  getModelWithJsonOutput(schema) {
33
33
  if (this.template.withJsonOutput != null) {
34
- return this.template.withJsonOutput?.({
34
+ return this.template.withJsonOutput({
35
35
  model: this.model,
36
36
  schema,
37
37
  });
@@ -40,7 +40,7 @@ class StructureFromTextGenerationModel {
40
40
  }
41
41
  async doGenerateStructure(schema, prompt, options) {
42
42
  const { rawResponse, text } = await (0, generateText_js_1.generateText)({
43
- model: this.model,
43
+ model: this.getModelWithJsonOutput(schema),
44
44
  prompt: this.template.createPrompt(prompt, schema),
45
45
  fullResponse: true,
46
46
  ...options,
@@ -28,7 +28,7 @@ export class StructureFromTextGenerationModel {
28
28
  }
29
29
  getModelWithJsonOutput(schema) {
30
30
  if (this.template.withJsonOutput != null) {
31
- return this.template.withJsonOutput?.({
31
+ return this.template.withJsonOutput({
32
32
  model: this.model,
33
33
  schema,
34
34
  });
@@ -37,7 +37,7 @@ export class StructureFromTextGenerationModel {
37
37
  }
38
38
  async doGenerateStructure(schema, prompt, options) {
39
39
  const { rawResponse, text } = await generateText({
40
- model: this.model,
40
+ model: this.getModelWithJsonOutput(schema),
41
41
  prompt: this.template.createPrompt(prompt, schema),
42
42
  fullResponse: true,
43
43
  ...options,
@@ -11,7 +11,7 @@ class StructureFromTextStreamingModel extends StructureFromTextGenerationModel_j
11
11
  }
12
12
  async doStreamStructure(schema, prompt, options) {
13
13
  const textStream = await (0, streamText_js_1.streamText)({
14
- model: this.model,
14
+ model: this.getModelWithJsonOutput(schema),
15
15
  prompt: this.template.createPrompt(prompt, schema),
16
16
  ...options,
17
17
  });
@@ -8,7 +8,7 @@ export class StructureFromTextStreamingModel extends StructureFromTextGeneration
8
8
  }
9
9
  async doStreamStructure(schema, prompt, options) {
10
10
  const textStream = await streamText({
11
- model: this.model,
11
+ model: this.getModelWithJsonOutput(schema),
12
12
  prompt: this.template.createPrompt(prompt, schema),
13
13
  ...options,
14
14
  });
@@ -10,6 +10,12 @@ async function streamStructure({ model, schema, prompt, fullResponse, ...options
10
10
  : prompt;
11
11
  let accumulatedText = "";
12
12
  let lastStructure;
13
+ let resolveStructure;
14
+ let rejectStructure;
15
+ const structurePromise = new Promise((resolve, reject) => {
16
+ resolveStructure = resolve;
17
+ rejectStructure = reject;
18
+ });
13
19
  const callResponse = await (0, executeStreamCall_js_1.executeStreamCall)({
14
20
  functionType: "stream-structure",
15
21
  input: {
@@ -29,29 +35,25 @@ async function streamStructure({ model, schema, prompt, fullResponse, ...options
29
35
  // only send a new part into the stream when the partial structure has changed:
30
36
  if (!(0, isDeepEqualData_js_1.isDeepEqualData)(lastStructure, latestStructure)) {
31
37
  lastStructure = latestStructure;
32
- return {
33
- isComplete: false,
34
- value: lastStructure,
35
- };
38
+ return lastStructure;
36
39
  }
37
40
  return undefined;
38
41
  },
39
- processFinished: () => {
42
+ onDone: () => {
40
43
  // process the final result (full type validation):
41
44
  const parseResult = schema.validate(lastStructure);
42
- if (!parseResult.success) {
43
- reportError(parseResult.error);
44
- throw parseResult.error;
45
+ if (parseResult.success) {
46
+ resolveStructure(parseResult.data);
47
+ }
48
+ else {
49
+ rejectStructure(parseResult.error);
45
50
  }
46
- return {
47
- isComplete: true,
48
- value: parseResult.data,
49
- };
50
51
  },
51
52
  });
52
53
  return fullResponse
53
54
  ? {
54
55
  structureStream: callResponse.value,
56
+ structurePromise,
55
57
  metadata: callResponse.metadata,
56
58
  }
57
59
  : callResponse.value;
@@ -1,25 +1,12 @@
1
+ import type { PartialDeep } from "type-fest";
1
2
  import { FunctionOptions } from "../../core/FunctionOptions.js";
2
3
  import { JsonSchemaProducer } from "../../core/schema/JsonSchemaProducer.js";
3
4
  import { Schema } from "../../core/schema/Schema.js";
4
5
  import { ModelCallMetadata } from "../ModelCallMetadata.js";
5
6
  import { StructureStreamingModel } from "./StructureGenerationModel.js";
6
- export type StructureStreamPart<STRUCTURE> = {
7
- isComplete: false;
8
- value: unknown;
9
- } | {
10
- isComplete: true;
11
- value: STRUCTURE;
12
- };
13
7
  /**
14
8
  * Generate and stream an object for a prompt and a structure definition.
15
9
  *
16
- * The final object is typed according to the structure definition.
17
- * The partial objects are of unknown type,
18
- * but are supposed to be partial version of the final object
19
- * (unless the underlying model returns invalid data).
20
- *
21
- * The structure definition is used as part of the final prompt.
22
- *
23
10
  * For the OpenAI chat model, this generates and parses a function call with a single function.
24
11
  *
25
12
  * @see https://modelfusion.dev/guide/function/generate-structure
@@ -44,15 +31,8 @@ export type StructureStreamPart<STRUCTURE> = {
44
31
  * ]
45
32
  * });
46
33
  *
47
- * for await (const part of structureStream) {
48
- * if (!part.isComplete) {
49
- * const unknownPartialStructure = part.value;
50
- * // use your own logic to handle partial structures, e.g. with Zod .deepPartial()
51
- * // it depends on your application at which points you want to act on the partial structures
52
- * } else {
53
- * const fullyTypedStructure = part.value;
54
- * // ...
55
- * }
34
+ * for await (const partialStructure of structureStream) {
35
+ * // ...
56
36
  * }
57
37
  *
58
38
  * @param {StructureStreamingModel<PROMPT>} structureGenerator - The model to use for streaming
@@ -60,26 +40,28 @@ export type StructureStreamPart<STRUCTURE> = {
60
40
  * @param {PROMPT | ((schema: Schema<STRUCTURE>) => PROMPT)} prompt
61
41
  * The prompt to be used.
62
42
  * You can also pass a function that takes the schema as an argument and returns the prompt.
63
- * @param {FunctionOptions} [options] - Optional function options
64
43
  *
65
44
  * @returns {AsyncIterableResultPromise<StructureStreamPart<STRUCTURE>>}
66
45
  * The async iterable result promise.
67
- * Each part of the stream is either a partial structure or the final structure.
68
- * It contains a isComplete flag to indicate whether the structure is complete,
69
- * and a value that is either the partial structure or the final structure.
46
+ * Each part of the stream is a partial structure.
70
47
  */
71
48
  export declare function streamStructure<STRUCTURE, PROMPT>(args: {
72
49
  model: StructureStreamingModel<PROMPT>;
73
50
  schema: Schema<STRUCTURE> & JsonSchemaProducer;
74
51
  prompt: PROMPT | ((schema: Schema<STRUCTURE>) => PROMPT);
75
52
  fullResponse?: false;
76
- } & FunctionOptions): Promise<AsyncIterable<StructureStreamPart<STRUCTURE>>>;
53
+ } & FunctionOptions): Promise<AsyncIterable<PartialDeep<STRUCTURE, {
54
+ recurseIntoArrays: true;
55
+ }>>>;
77
56
  export declare function streamStructure<STRUCTURE, PROMPT>(args: {
78
57
  model: StructureStreamingModel<PROMPT>;
79
58
  schema: Schema<STRUCTURE> & JsonSchemaProducer;
80
59
  prompt: PROMPT | ((schema: Schema<STRUCTURE>) => PROMPT);
81
60
  fullResponse: true;
82
61
  } & FunctionOptions): Promise<{
83
- structureStream: AsyncIterable<StructureStreamPart<STRUCTURE>>;
62
+ structureStream: AsyncIterable<PartialDeep<STRUCTURE, {
63
+ recurseIntoArrays: true;
64
+ }>>;
65
+ structurePromise: PromiseLike<STRUCTURE>;
84
66
  metadata: Omit<ModelCallMetadata, "durationInMs" | "finishTimestamp">;
85
67
  }>;
@@ -7,6 +7,12 @@ export async function streamStructure({ model, schema, prompt, fullResponse, ...
7
7
  : prompt;
8
8
  let accumulatedText = "";
9
9
  let lastStructure;
10
+ let resolveStructure;
11
+ let rejectStructure;
12
+ const structurePromise = new Promise((resolve, reject) => {
13
+ resolveStructure = resolve;
14
+ rejectStructure = reject;
15
+ });
10
16
  const callResponse = await executeStreamCall({
11
17
  functionType: "stream-structure",
12
18
  input: {
@@ -26,29 +32,25 @@ export async function streamStructure({ model, schema, prompt, fullResponse, ...
26
32
  // only send a new part into the stream when the partial structure has changed:
27
33
  if (!isDeepEqualData(lastStructure, latestStructure)) {
28
34
  lastStructure = latestStructure;
29
- return {
30
- isComplete: false,
31
- value: lastStructure,
32
- };
35
+ return lastStructure;
33
36
  }
34
37
  return undefined;
35
38
  },
36
- processFinished: () => {
39
+ onDone: () => {
37
40
  // process the final result (full type validation):
38
41
  const parseResult = schema.validate(lastStructure);
39
- if (!parseResult.success) {
40
- reportError(parseResult.error);
41
- throw parseResult.error;
42
+ if (parseResult.success) {
43
+ resolveStructure(parseResult.data);
44
+ }
45
+ else {
46
+ rejectStructure(parseResult.error);
42
47
  }
43
- return {
44
- isComplete: true,
45
- value: parseResult.data,
46
- };
47
48
  },
48
49
  });
49
50
  return fullResponse
50
51
  ? {
51
52
  structureStream: callResponse.value,
53
+ structurePromise,
52
54
  metadata: callResponse.metadata,
53
55
  }
54
56
  : callResponse.value;
@@ -46,7 +46,7 @@ async function streamText({ model, prompt, fullResponse, ...options }) {
46
46
  return fullResponse
47
47
  ? {
48
48
  textStream: callResponse.value,
49
- text: textPromise,
49
+ textPromise,
50
50
  metadata: callResponse.metadata,
51
51
  }
52
52
  : callResponse.value;
@@ -37,6 +37,6 @@ export declare function streamText<PROMPT>(args: {
37
37
  fullResponse: true;
38
38
  } & FunctionOptions): Promise<{
39
39
  textStream: AsyncIterable<string>;
40
- text: PromiseLike<string>;
40
+ textPromise: PromiseLike<string>;
41
41
  metadata: Omit<ModelCallMetadata, "durationInMs" | "finishTimestamp">;
42
42
  }>;
@@ -43,7 +43,7 @@ export async function streamText({ model, prompt, fullResponse, ...options }) {
43
43
  return fullResponse
44
44
  ? {
45
45
  textStream: callResponse.value,
46
- text: textPromise,
46
+ textPromise,
47
47
  metadata: callResponse.metadata,
48
48
  }
49
49
  : callResponse.value;
@@ -44,7 +44,7 @@ describe("streamText", () => {
44
44
  ]);
45
45
  });
46
46
  it("should return text", async () => {
47
- const { text } = await (0, streamText_js_1.streamText)({
47
+ const { textPromise } = await (0, streamText_js_1.streamText)({
48
48
  model: new MistralChatModel_js_1.MistralChatModel({
49
49
  api: new MistralApiConfiguration_js_1.MistralApiConfiguration({ apiKey: "test-key" }),
50
50
  model: "mistral-tiny",
@@ -52,7 +52,7 @@ describe("streamText", () => {
52
52
  prompt: "hello",
53
53
  fullResponse: true,
54
54
  });
55
- expect(await text).toStrictEqual("Hello, world!");
55
+ expect(await textPromise).toStrictEqual("Hello, world!");
56
56
  });
57
57
  });
58
58
  });
@@ -42,7 +42,7 @@ describe("streamText", () => {
42
42
  ]);
43
43
  });
44
44
  it("should return text", async () => {
45
- const { text } = await streamText({
45
+ const { textPromise } = await streamText({
46
46
  model: new MistralChatModel({
47
47
  api: new MistralApiConfiguration({ apiKey: "test-key" }),
48
48
  model: "mistral-tiny",
@@ -50,7 +50,7 @@ describe("streamText", () => {
50
50
  prompt: "hello",
51
51
  fullResponse: true,
52
52
  });
53
- expect(await text).toStrictEqual("Hello, world!");
53
+ expect(await textPromise).toStrictEqual("Hello, world!");
54
54
  });
55
55
  });
56
56
  });
@@ -231,7 +231,7 @@ const ollamaChatStreamChunkSchema = zod_1.z.discriminatedUnion("done", [
231
231
  created_at: zod_1.z.string(),
232
232
  total_duration: zod_1.z.number(),
233
233
  load_duration: zod_1.z.number().optional(),
234
- prompt_eval_count: zod_1.z.number(),
234
+ prompt_eval_count: zod_1.z.number().optional(),
235
235
  prompt_eval_duration: zod_1.z.number().optional(),
236
236
  eval_count: zod_1.z.number(),
237
237
  eval_duration: zod_1.z.number(),
@@ -94,10 +94,10 @@ export declare class OllamaChatModel extends AbstractModel<OllamaChatModelSettin
94
94
  done: true;
95
95
  created_at: string;
96
96
  total_duration: number;
97
- prompt_eval_count: number;
98
97
  eval_count: number;
99
98
  eval_duration: number;
100
99
  load_duration?: number | undefined;
100
+ prompt_eval_count?: number | undefined;
101
101
  prompt_eval_duration?: number | undefined;
102
102
  }>>>;
103
103
  extractTextDelta(delta: unknown): string | undefined;
@@ -197,7 +197,7 @@ declare const ollamaChatStreamChunkSchema: z.ZodDiscriminatedUnion<"done", [z.Zo
197
197
  created_at: z.ZodString;
198
198
  total_duration: z.ZodNumber;
199
199
  load_duration: z.ZodOptional<z.ZodNumber>;
200
- prompt_eval_count: z.ZodNumber;
200
+ prompt_eval_count: z.ZodOptional<z.ZodNumber>;
201
201
  prompt_eval_duration: z.ZodOptional<z.ZodNumber>;
202
202
  eval_count: z.ZodNumber;
203
203
  eval_duration: z.ZodNumber;
@@ -206,20 +206,20 @@ declare const ollamaChatStreamChunkSchema: z.ZodDiscriminatedUnion<"done", [z.Zo
206
206
  done: true;
207
207
  created_at: string;
208
208
  total_duration: number;
209
- prompt_eval_count: number;
210
209
  eval_count: number;
211
210
  eval_duration: number;
212
211
  load_duration?: number | undefined;
212
+ prompt_eval_count?: number | undefined;
213
213
  prompt_eval_duration?: number | undefined;
214
214
  }, {
215
215
  model: string;
216
216
  done: true;
217
217
  created_at: string;
218
218
  total_duration: number;
219
- prompt_eval_count: number;
220
219
  eval_count: number;
221
220
  eval_duration: number;
222
221
  load_duration?: number | undefined;
222
+ prompt_eval_count?: number | undefined;
223
223
  prompt_eval_duration?: number | undefined;
224
224
  }>]>;
225
225
  export type OllamaChatStreamChunk = z.infer<typeof ollamaChatStreamChunkSchema>;
@@ -274,10 +274,10 @@ export declare const OllamaChatResponseFormat: {
274
274
  done: true;
275
275
  created_at: string;
276
276
  total_duration: number;
277
- prompt_eval_count: number;
278
277
  eval_count: number;
279
278
  eval_duration: number;
280
279
  load_duration?: number | undefined;
280
+ prompt_eval_count?: number | undefined;
281
281
  prompt_eval_duration?: number | undefined;
282
282
  }>>>;
283
283
  };
@@ -227,7 +227,7 @@ const ollamaChatStreamChunkSchema = z.discriminatedUnion("done", [
227
227
  created_at: z.string(),
228
228
  total_duration: z.number(),
229
229
  load_duration: z.number().optional(),
230
- prompt_eval_count: z.number(),
230
+ prompt_eval_count: z.number().optional(),
231
231
  prompt_eval_duration: z.number().optional(),
232
232
  eval_count: z.number(),
233
233
  eval_duration: z.number(),
@@ -180,7 +180,7 @@ class OllamaCompletionModel extends AbstractModel_js_1.AbstractModel {
180
180
  return this.settings.promptTemplate ?? OllamaCompletionPrompt_js_1.Text;
181
181
  }
182
182
  withJsonOutput() {
183
- return this;
183
+ return this.withSettings({ format: "json" });
184
184
  }
185
185
  withTextPrompt() {
186
186
  return this.withPromptTemplate(this.promptTemplateProvider.text());
@@ -235,7 +235,7 @@ const ollamaCompletionStreamChunkSchema = zod_1.z.discriminatedUnion("done", [
235
235
  load_duration: zod_1.z.number().optional(),
236
236
  sample_count: zod_1.z.number().optional(),
237
237
  sample_duration: zod_1.z.number().optional(),
238
- prompt_eval_count: zod_1.z.number(),
238
+ prompt_eval_count: zod_1.z.number().optional(),
239
239
  prompt_eval_duration: zod_1.z.number().optional(),
240
240
  eval_count: zod_1.z.number(),
241
241
  eval_duration: zod_1.z.number(),
@@ -127,12 +127,12 @@ export declare class OllamaCompletionModel<CONTEXT_WINDOW_SIZE extends number |
127
127
  done: true;
128
128
  created_at: string;
129
129
  total_duration: number;
130
- prompt_eval_count: number;
131
130
  eval_count: number;
132
131
  eval_duration: number;
133
132
  load_duration?: number | undefined;
134
133
  sample_count?: number | undefined;
135
134
  sample_duration?: number | undefined;
135
+ prompt_eval_count?: number | undefined;
136
136
  prompt_eval_duration?: number | undefined;
137
137
  context?: number[] | undefined;
138
138
  }>>>;
@@ -209,7 +209,7 @@ declare const ollamaCompletionStreamChunkSchema: z.ZodDiscriminatedUnion<"done",
209
209
  load_duration: z.ZodOptional<z.ZodNumber>;
210
210
  sample_count: z.ZodOptional<z.ZodNumber>;
211
211
  sample_duration: z.ZodOptional<z.ZodNumber>;
212
- prompt_eval_count: z.ZodNumber;
212
+ prompt_eval_count: z.ZodOptional<z.ZodNumber>;
213
213
  prompt_eval_duration: z.ZodOptional<z.ZodNumber>;
214
214
  eval_count: z.ZodNumber;
215
215
  eval_duration: z.ZodNumber;
@@ -219,12 +219,12 @@ declare const ollamaCompletionStreamChunkSchema: z.ZodDiscriminatedUnion<"done",
219
219
  done: true;
220
220
  created_at: string;
221
221
  total_duration: number;
222
- prompt_eval_count: number;
223
222
  eval_count: number;
224
223
  eval_duration: number;
225
224
  load_duration?: number | undefined;
226
225
  sample_count?: number | undefined;
227
226
  sample_duration?: number | undefined;
227
+ prompt_eval_count?: number | undefined;
228
228
  prompt_eval_duration?: number | undefined;
229
229
  context?: number[] | undefined;
230
230
  }, {
@@ -232,12 +232,12 @@ declare const ollamaCompletionStreamChunkSchema: z.ZodDiscriminatedUnion<"done",
232
232
  done: true;
233
233
  created_at: string;
234
234
  total_duration: number;
235
- prompt_eval_count: number;
236
235
  eval_count: number;
237
236
  eval_duration: number;
238
237
  load_duration?: number | undefined;
239
238
  sample_count?: number | undefined;
240
239
  sample_duration?: number | undefined;
240
+ prompt_eval_count?: number | undefined;
241
241
  prompt_eval_duration?: number | undefined;
242
242
  context?: number[] | undefined;
243
243
  }>]>;
@@ -288,12 +288,12 @@ export declare const OllamaCompletionResponseFormat: {
288
288
  done: true;
289
289
  created_at: string;
290
290
  total_duration: number;
291
- prompt_eval_count: number;
292
291
  eval_count: number;
293
292
  eval_duration: number;
294
293
  load_duration?: number | undefined;
295
294
  sample_count?: number | undefined;
296
295
  sample_duration?: number | undefined;
296
+ prompt_eval_count?: number | undefined;
297
297
  prompt_eval_duration?: number | undefined;
298
298
  context?: number[] | undefined;
299
299
  }>>>;
@@ -177,7 +177,7 @@ export class OllamaCompletionModel extends AbstractModel {
177
177
  return this.settings.promptTemplate ?? Text;
178
178
  }
179
179
  withJsonOutput() {
180
- return this;
180
+ return this.withSettings({ format: "json" });
181
181
  }
182
182
  withTextPrompt() {
183
183
  return this.withPromptTemplate(this.promptTemplateProvider.text());
@@ -231,7 +231,7 @@ const ollamaCompletionStreamChunkSchema = z.discriminatedUnion("done", [
231
231
  load_duration: z.number().optional(),
232
232
  sample_count: z.number().optional(),
233
233
  sample_duration: z.number().optional(),
234
- prompt_eval_count: z.number(),
234
+ prompt_eval_count: z.number().optional(),
235
235
  prompt_eval_duration: z.number().optional(),
236
236
  eval_count: z.number(),
237
237
  eval_duration: z.number(),
@@ -124,13 +124,11 @@ describe("streamStructure", () => {
124
124
  schema: (0, ZodSchema_js_1.zodSchema)(zod_1.z.object({ name: zod_1.z.string() })),
125
125
  prompt: "generate a name",
126
126
  });
127
- // note: space moved to last chunk bc of trimming
128
127
  expect(await (0, arrayFromAsync_js_1.arrayFromAsync)(stream)).toStrictEqual([
129
- { isComplete: false, value: {} },
130
- { isComplete: false, value: { name: "" } },
131
- { isComplete: false, value: { name: "M" } },
132
- { isComplete: false, value: { name: "Mike" } },
133
- { isComplete: true, value: { name: "Mike" } },
128
+ {},
129
+ { name: "" },
130
+ { name: "M" },
131
+ { name: "Mike" },
134
132
  ]);
135
133
  });
136
134
  });
@@ -122,13 +122,11 @@ describe("streamStructure", () => {
122
122
  schema: zodSchema(z.object({ name: z.string() })),
123
123
  prompt: "generate a name",
124
124
  });
125
- // note: space moved to last chunk bc of trimming
126
125
  expect(await arrayFromAsync(stream)).toStrictEqual([
127
- { isComplete: false, value: {} },
128
- { isComplete: false, value: { name: "" } },
129
- { isComplete: false, value: { name: "M" } },
130
- { isComplete: false, value: { name: "Mike" } },
131
- { isComplete: true, value: { name: "Mike" } },
126
+ {},
127
+ { name: "" },
128
+ { name: "M" },
129
+ { name: "Mike" },
132
130
  ]);
133
131
  });
134
132
  });
@@ -89,13 +89,11 @@ describe("streamStructure", () => {
89
89
  schema: (0, ZodSchema_js_1.zodSchema)(zod_1.z.object({ name: zod_1.z.string() })),
90
90
  prompt: "generate a name",
91
91
  });
92
- // note: space moved to last chunk bc of trimming
93
92
  expect(await (0, arrayFromAsync_js_1.arrayFromAsync)(stream)).toStrictEqual([
94
- { isComplete: false, value: {} },
95
- { isComplete: false, value: { name: "" } },
96
- { isComplete: false, value: { name: "M" } },
97
- { isComplete: false, value: { name: "Mike" } },
98
- { isComplete: true, value: { name: "Mike" } },
93
+ {},
94
+ { name: "" },
95
+ { name: "M" },
96
+ { name: "Mike" },
99
97
  ]);
100
98
  });
101
99
  });
@@ -87,13 +87,11 @@ describe("streamStructure", () => {
87
87
  schema: zodSchema(z.object({ name: z.string() })),
88
88
  prompt: "generate a name",
89
89
  });
90
- // note: space moved to last chunk bc of trimming
91
90
  expect(await arrayFromAsync(stream)).toStrictEqual([
92
- { isComplete: false, value: {} },
93
- { isComplete: false, value: { name: "" } },
94
- { isComplete: false, value: { name: "M" } },
95
- { isComplete: false, value: { name: "Mike" } },
96
- { isComplete: true, value: { name: "Mike" } },
91
+ {},
92
+ { name: "" },
93
+ { name: "M" },
94
+ { name: "Mike" },
97
95
  ]);
98
96
  });
99
97
  });
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "modelfusion",
3
3
  "description": "The TypeScript library for building AI applications.",
4
- "version": "0.126.0",
4
+ "version": "0.127.0",
5
5
  "author": "Lars Grammel",
6
6
  "license": "MIT",
7
7
  "keywords": [
@@ -80,6 +80,7 @@
80
80
  "@vitest/ui": "1.1.0",
81
81
  "eslint": "^8.45.0",
82
82
  "eslint-config-prettier": "9.1.0",
83
- "msw": "2.0.11"
83
+ "msw": "2.0.11",
84
+ "type-fest": "4.9.0"
84
85
  }
85
86
  }