modelfusion 0.96.0 → 0.98.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. package/README.md +11 -4
  2. package/model-function/embed/embed.cjs +14 -2
  3. package/model-function/embed/embed.d.ts +6 -6
  4. package/model-function/embed/embed.js +14 -2
  5. package/model-function/generate-image/generateImage.cjs +10 -9
  6. package/model-function/generate-image/generateImage.d.ts +4 -6
  7. package/model-function/generate-image/generateImage.js +10 -9
  8. package/model-function/generate-speech/generateSpeech.cjs +7 -1
  9. package/model-function/generate-speech/generateSpeech.d.ts +3 -3
  10. package/model-function/generate-speech/generateSpeech.js +7 -1
  11. package/model-function/generate-speech/streamSpeech.cjs +6 -1
  12. package/model-function/generate-speech/streamSpeech.d.ts +3 -3
  13. package/model-function/generate-speech/streamSpeech.js +6 -1
  14. package/model-function/generate-structure/StructureFromTextGenerationModel.cjs +5 -5
  15. package/model-function/generate-structure/StructureFromTextGenerationModel.js +5 -5
  16. package/model-function/generate-structure/StructureFromTextStreamingModel.cjs +5 -5
  17. package/model-function/generate-structure/StructureFromTextStreamingModel.js +5 -5
  18. package/model-function/generate-structure/generateStructure.cjs +7 -1
  19. package/model-function/generate-structure/generateStructure.d.ts +3 -3
  20. package/model-function/generate-structure/generateStructure.js +7 -1
  21. package/model-function/generate-structure/streamStructure.cjs +6 -1
  22. package/model-function/generate-structure/streamStructure.d.ts +3 -3
  23. package/model-function/generate-structure/streamStructure.js +6 -1
  24. package/model-function/generate-text/generateText.cjs +7 -1
  25. package/model-function/generate-text/generateText.d.ts +3 -3
  26. package/model-function/generate-text/generateText.js +7 -1
  27. package/model-function/generate-text/streamText.cjs +6 -1
  28. package/model-function/generate-text/streamText.d.ts +3 -3
  29. package/model-function/generate-text/streamText.js +6 -1
  30. package/model-function/generate-transcription/generateTranscription.cjs +1 -1
  31. package/model-function/generate-transcription/generateTranscription.d.ts +2 -2
  32. package/model-function/generate-transcription/generateTranscription.js +1 -1
  33. package/model-provider/cohere/CohereTextEmbeddingModel.d.ts +3 -3
  34. package/model-provider/ollama/OllamaTextGenerationModel.cjs +60 -57
  35. package/model-provider/ollama/OllamaTextGenerationModel.d.ts +33 -22
  36. package/model-provider/ollama/OllamaTextGenerationModel.js +60 -57
  37. package/model-provider/ollama/OllamaTextGenerationModel.test.cjs +2 -2
  38. package/model-provider/ollama/OllamaTextGenerationModel.test.js +2 -2
  39. package/model-provider/openai/chat/AbstractOpenAIChatModel.cjs +1 -1
  40. package/model-provider/openai/chat/AbstractOpenAIChatModel.d.ts +49 -0
  41. package/model-provider/openai/chat/AbstractOpenAIChatModel.js +1 -1
  42. package/model-provider/openai/chat/OpenAIChatModel.test.cjs +61 -0
  43. package/model-provider/openai/chat/OpenAIChatModel.test.d.ts +1 -0
  44. package/model-provider/openai/chat/OpenAIChatModel.test.js +59 -0
  45. package/model-provider/openai/chat/OpenAIChatStreamIterable.cjs +8 -3
  46. package/model-provider/openai/chat/OpenAIChatStreamIterable.d.ts +1 -1
  47. package/model-provider/openai/chat/OpenAIChatStreamIterable.js +8 -3
  48. package/package.json +1 -1
  49. package/tool/execute-tool/executeTool.cjs +1 -1
  50. package/tool/execute-tool/executeTool.d.ts +2 -2
  51. package/tool/execute-tool/executeTool.js +1 -1
  52. package/tool/generate-tool-call/TextGenerationToolCallModel.cjs +4 -4
  53. package/tool/generate-tool-call/TextGenerationToolCallModel.js +4 -4
  54. package/tool/generate-tool-call/generateToolCall.cjs +7 -1
  55. package/tool/generate-tool-call/generateToolCall.d.ts +3 -3
  56. package/tool/generate-tool-call/generateToolCall.js +7 -1
  57. package/tool/generate-tool-calls-or-text/TextGenerationToolCallsOrGenerateTextModel.cjs +4 -4
  58. package/tool/generate-tool-calls-or-text/TextGenerationToolCallsOrGenerateTextModel.js +4 -4
  59. package/tool/generate-tool-calls-or-text/generateToolCallsOrText.cjs +1 -1
  60. package/tool/generate-tool-calls-or-text/generateToolCallsOrText.d.ts +2 -2
  61. package/tool/generate-tool-calls-or-text/generateToolCallsOrText.js +1 -1
  62. package/tool/use-tools-or-generate-text/useToolsOrGenerateText.cjs +1 -1
  63. package/tool/use-tools-or-generate-text/useToolsOrGenerateText.js +1 -1
package/README.md CHANGED
@@ -85,7 +85,10 @@ Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai),
85
85
  Multi-modal vision models such as GPT 4 Vision can process images as part of the prompt.
86
86
 
87
87
  ```ts
88
- import { streamText, openai } from "modelfusion";
88
+ import { streamText, openai, OpenAIChatMessage } from "modelfusion";
89
+ import { readFileSync } from "fs";
90
+
91
+ const image = readFileSync("./image.png").toString("base64");
89
92
 
90
93
  const textStream = await streamText(
91
94
  openai.ChatTextGenerator({ model: "gpt-4-vision-preview" }),
@@ -96,9 +99,13 @@ const textStream = await streamText(
96
99
  ]),
97
100
  ]
98
101
  );
102
+
103
+ for await (const textPart of textStream) {
104
+ process.stdout.write(textPart);
105
+ }
99
106
  ```
100
107
 
101
- Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [OpenAI compatible](https://modelfusion.dev/integration/model-provider/openaicompatible), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp)
108
+ Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [OpenAI compatible](https://modelfusion.dev/integration/model-provider/openaicompatible), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp), [Ollama](https://modelfusion.dev/integration/model-provider/ollama)
102
109
 
103
110
  ### [Generate Image](https://modelfusion.dev/guide/function/generate-image)
104
111
 
@@ -608,7 +615,7 @@ const image = await generateImage(
608
615
 
609
616
  ### Metadata and original responses
610
617
 
611
- ModelFusion model functions return rich results that include the original response and metadata when you set the `returnType` option to `full`.
618
+ ModelFusion model functions return rich responses that include the original response and metadata when you set the `fullResponse` option to `true`.
612
619
 
613
620
  ```ts
614
621
  // access the full response (needs to be typed) and the metadata:
@@ -619,7 +626,7 @@ const { value, response, metadata } = await generateText(
619
626
  n: 2, // generate 2 completions
620
627
  }),
621
628
  "Write a short story about a robot learning to love:\n\n",
622
- { returnType: "full" }
629
+ { fullResponse: true }
623
630
  );
624
631
 
625
632
  console.log(metadata);
@@ -43,7 +43,13 @@ async function embedMany(model, values, options) {
43
43
  };
44
44
  },
45
45
  });
46
- return options?.returnType === "full" ? fullResponse : fullResponse.value;
46
+ return options?.fullResponse
47
+ ? {
48
+ embeddings: fullResponse.value,
49
+ response: fullResponse.response,
50
+ metadata: fullResponse.metadata,
51
+ }
52
+ : fullResponse.value;
47
53
  }
48
54
  exports.embedMany = embedMany;
49
55
  async function embed(model, value, options) {
@@ -60,6 +66,12 @@ async function embed(model, value, options) {
60
66
  };
61
67
  },
62
68
  });
63
- return options?.returnType === "full" ? fullResponse : fullResponse.value;
69
+ return options?.fullResponse
70
+ ? {
71
+ embedding: fullResponse.value,
72
+ response: fullResponse.response,
73
+ metadata: fullResponse.metadata,
74
+ }
75
+ : fullResponse.value;
64
76
  }
65
77
  exports.embed = embed;
@@ -23,12 +23,12 @@ import { EmbeddingModel, EmbeddingModelSettings } from "./EmbeddingModel.js";
23
23
  * @returns {Promise<Vector[]>} - A promise that resolves to an array of vectors representing the embeddings.
24
24
  */
25
25
  export declare function embedMany<VALUE>(model: EmbeddingModel<VALUE, EmbeddingModelSettings>, values: VALUE[], options?: FunctionOptions & {
26
- returnType?: "vectors";
26
+ fullResponse?: false;
27
27
  }): Promise<Vector[]>;
28
28
  export declare function embedMany<VALUE>(model: EmbeddingModel<VALUE, EmbeddingModelSettings>, values: VALUE[], options: FunctionOptions & {
29
- returnType: "full";
29
+ fullResponse: true;
30
30
  }): Promise<{
31
- value: Vector[];
31
+ embeddings: Vector[];
32
32
  response: unknown;
33
33
  metadata: ModelCallMetadata;
34
34
  }>;
@@ -50,12 +50,12 @@ export declare function embedMany<VALUE>(model: EmbeddingModel<VALUE, EmbeddingM
50
50
  * @returns {Promise<Vector>} - A promise that resolves to a vector representing the embedding.
51
51
  */
52
52
  export declare function embed<VALUE>(model: EmbeddingModel<VALUE, EmbeddingModelSettings>, value: VALUE, options?: FunctionOptions & {
53
- returnType?: "vector";
53
+ fullResponse?: false;
54
54
  }): Promise<Vector>;
55
55
  export declare function embed<VALUE>(model: EmbeddingModel<VALUE, EmbeddingModelSettings>, value: VALUE, options: FunctionOptions & {
56
- returnType: "full";
56
+ fullResponse: true;
57
57
  }): Promise<{
58
- value: Vector;
58
+ embedding: Vector;
59
59
  response: unknown;
60
60
  metadata: ModelCallMetadata;
61
61
  }>;
@@ -40,7 +40,13 @@ export async function embedMany(model, values, options) {
40
40
  };
41
41
  },
42
42
  });
43
- return options?.returnType === "full" ? fullResponse : fullResponse.value;
43
+ return options?.fullResponse
44
+ ? {
45
+ embeddings: fullResponse.value,
46
+ response: fullResponse.response,
47
+ metadata: fullResponse.metadata,
48
+ }
49
+ : fullResponse.value;
44
50
  }
45
51
  export async function embed(model, value, options) {
46
52
  const fullResponse = await executeStandardCall({
@@ -56,5 +62,11 @@ export async function embed(model, value, options) {
56
62
  };
57
63
  },
58
64
  });
59
- return options?.returnType === "full" ? fullResponse : fullResponse.value;
65
+ return options?.fullResponse
66
+ ? {
67
+ embedding: fullResponse.value,
68
+ response: fullResponse.response,
69
+ metadata: fullResponse.metadata,
70
+ }
71
+ : fullResponse.value;
60
72
  }
@@ -16,14 +16,15 @@ async function generateImage(model, prompt, options) {
16
16
  };
17
17
  },
18
18
  });
19
- switch (options?.returnType) {
20
- case "full":
21
- return fullResponse;
22
- case "base64":
23
- return fullResponse.value;
24
- case "buffer":
25
- default:
26
- return Buffer.from(fullResponse.value, "base64");
27
- }
19
+ const imageBase64 = fullResponse.value;
20
+ const image = Buffer.from(imageBase64, "base64");
21
+ return options?.fullResponse
22
+ ? {
23
+ image,
24
+ imageBase64,
25
+ response: fullResponse.response,
26
+ metadata: fullResponse.metadata,
27
+ }
28
+ : image;
28
29
  }
29
30
  exports.generateImage = generateImage;
@@ -27,15 +27,13 @@ import { ImageGenerationModel, ImageGenerationModelSettings } from "./ImageGener
27
27
  * The image is a Buffer containing the image data in PNG format.
28
28
  */
29
29
  export declare function generateImage<PROMPT>(model: ImageGenerationModel<PROMPT, ImageGenerationModelSettings>, prompt: PROMPT, options?: FunctionOptions & {
30
- returnType?: "buffer";
30
+ fullResponse?: false;
31
31
  }): Promise<Buffer>;
32
32
  export declare function generateImage<PROMPT>(model: ImageGenerationModel<PROMPT, ImageGenerationModelSettings>, prompt: PROMPT, options: FunctionOptions & {
33
- returnType: "base64";
34
- }): Promise<string>;
35
- export declare function generateImage<PROMPT>(model: ImageGenerationModel<PROMPT, ImageGenerationModelSettings>, prompt: PROMPT, options: FunctionOptions & {
36
- returnType: "full";
33
+ fullResponse: true;
37
34
  }): Promise<{
38
- value: string;
35
+ image: Buffer;
36
+ imageBase64: string;
39
37
  response: unknown;
40
38
  metadata: ModelCallMetadata;
41
39
  }>;
@@ -13,13 +13,14 @@ export async function generateImage(model, prompt, options) {
13
13
  };
14
14
  },
15
15
  });
16
- switch (options?.returnType) {
17
- case "full":
18
- return fullResponse;
19
- case "base64":
20
- return fullResponse.value;
21
- case "buffer":
22
- default:
23
- return Buffer.from(fullResponse.value, "base64");
24
- }
16
+ const imageBase64 = fullResponse.value;
17
+ const image = Buffer.from(imageBase64, "base64");
18
+ return options?.fullResponse
19
+ ? {
20
+ image,
21
+ imageBase64,
22
+ response: fullResponse.response,
23
+ metadata: fullResponse.metadata,
24
+ }
25
+ : image;
25
26
  }
@@ -16,6 +16,12 @@ async function generateSpeech(model, text, options) {
16
16
  };
17
17
  },
18
18
  });
19
- return options?.returnType === "full" ? fullResponse : fullResponse.value;
19
+ return options?.fullResponse
20
+ ? {
21
+ speech: fullResponse.value,
22
+ response: fullResponse.response,
23
+ metadata: fullResponse.metadata,
24
+ }
25
+ : fullResponse.value;
20
26
  }
21
27
  exports.generateSpeech = generateSpeech;
@@ -21,12 +21,12 @@ import { SpeechGenerationModel, SpeechGenerationModelSettings } from "./SpeechGe
21
21
  * @returns {Promise<Buffer>} - A promise that resolves to a buffer containing the synthesized speech.
22
22
  */
23
23
  export declare function generateSpeech(model: SpeechGenerationModel<SpeechGenerationModelSettings>, text: string, options?: FunctionOptions & {
24
- returnType?: "buffer";
24
+ fullResponse?: false;
25
25
  }): Promise<Buffer>;
26
26
  export declare function generateSpeech(model: SpeechGenerationModel<SpeechGenerationModelSettings>, text: string, options: FunctionOptions & {
27
- returnType: "full";
27
+ fullResponse: true;
28
28
  }): Promise<{
29
- value: Buffer;
29
+ speech: Buffer;
30
30
  response: unknown;
31
31
  metadata: ModelCallMetadata;
32
32
  }>;
@@ -13,5 +13,11 @@ export async function generateSpeech(model, text, options) {
13
13
  };
14
14
  },
15
15
  });
16
- return options?.returnType === "full" ? fullResponse : fullResponse.value;
16
+ return options?.fullResponse
17
+ ? {
18
+ speech: fullResponse.value,
19
+ response: fullResponse.response,
20
+ metadata: fullResponse.metadata,
21
+ }
22
+ : fullResponse.value;
17
23
  }
@@ -24,6 +24,11 @@ async function streamSpeech(model, text, options) {
24
24
  processDelta: (delta) => delta.valueDelta,
25
25
  getResult: () => ({}),
26
26
  });
27
- return options?.returnType === "full" ? fullResponse : fullResponse.value;
27
+ return options?.fullResponse
28
+ ? {
29
+ speechStream: fullResponse.value,
30
+ metadata: fullResponse.metadata,
31
+ }
32
+ : fullResponse.value;
28
33
  }
29
34
  exports.streamSpeech = streamSpeech;
@@ -27,11 +27,11 @@ import { SpeechGenerationModelSettings, StreamingSpeechGenerationModel } from ".
27
27
  * @returns {AsyncIterableResultPromise<Buffer>} An async iterable promise that contains the synthesized speech chunks.
28
28
  */
29
29
  export declare function streamSpeech(model: StreamingSpeechGenerationModel<SpeechGenerationModelSettings>, text: AsyncIterable<string> | string, options?: FunctionOptions & {
30
- returnType?: "buffer-stream";
30
+ fullResponse?: false;
31
31
  }): Promise<AsyncIterable<Buffer>>;
32
32
  export declare function streamSpeech(model: StreamingSpeechGenerationModel<SpeechGenerationModelSettings>, text: AsyncIterable<string> | string, options: FunctionOptions & {
33
- returnType: "full";
33
+ fullResponse: true;
34
34
  }): Promise<{
35
- value: AsyncIterable<Buffer>;
35
+ speechStream: AsyncIterable<Buffer>;
36
36
  metadata: Omit<ModelCallMetadata, "durationInMs" | "finishTimestamp">;
37
37
  }>;
@@ -21,5 +21,10 @@ export async function streamSpeech(model, text, options) {
21
21
  processDelta: (delta) => delta.valueDelta,
22
22
  getResult: () => ({}),
23
23
  });
24
- return options?.returnType === "full" ? fullResponse : fullResponse.value;
24
+ return options?.fullResponse
25
+ ? {
26
+ speechStream: fullResponse.value,
27
+ metadata: fullResponse.metadata,
28
+ }
29
+ : fullResponse.value;
25
30
  }
@@ -30,20 +30,20 @@ class StructureFromTextGenerationModel {
30
30
  return this.model.settingsForEvent;
31
31
  }
32
32
  async doGenerateStructure(schema, prompt, options) {
33
- const { response, value } = await (0, generateText_js_1.generateText)(this.model, this.template.createPrompt(prompt, schema), {
33
+ const { response, text } = await (0, generateText_js_1.generateText)(this.model, this.template.createPrompt(prompt, schema), {
34
34
  ...options,
35
- returnType: "full",
35
+ fullResponse: true,
36
36
  });
37
37
  try {
38
38
  return {
39
39
  response,
40
- value: this.template.extractStructure(value),
41
- valueText: value,
40
+ value: this.template.extractStructure(text),
41
+ valueText: text,
42
42
  };
43
43
  }
44
44
  catch (error) {
45
45
  throw new StructureParseError_js_1.StructureParseError({
46
- valueText: value,
46
+ valueText: text,
47
47
  cause: error,
48
48
  });
49
49
  }
@@ -27,20 +27,20 @@ export class StructureFromTextGenerationModel {
27
27
  return this.model.settingsForEvent;
28
28
  }
29
29
  async doGenerateStructure(schema, prompt, options) {
30
- const { response, value } = await generateText(this.model, this.template.createPrompt(prompt, schema), {
30
+ const { response, text } = await generateText(this.model, this.template.createPrompt(prompt, schema), {
31
31
  ...options,
32
- returnType: "full",
32
+ fullResponse: true,
33
33
  });
34
34
  try {
35
35
  return {
36
36
  response,
37
- value: this.template.extractStructure(value),
38
- valueText: value,
37
+ value: this.template.extractStructure(text),
38
+ valueText: text,
39
39
  };
40
40
  }
41
41
  catch (error) {
42
42
  throw new StructureParseError({
43
- valueText: value,
43
+ valueText: text,
44
44
  cause: error,
45
45
  });
46
46
  }
@@ -41,20 +41,20 @@ class StructureFromTextStreamingModel extends StructureFromTextGenerationModel_j
41
41
  return queue;
42
42
  }
43
43
  async doGenerateStructure(schema, prompt, options) {
44
- const { response, value } = await (0, generateText_js_1.generateText)(this.model, this.template.createPrompt(prompt, schema), {
44
+ const { response, text } = await (0, generateText_js_1.generateText)(this.model, this.template.createPrompt(prompt, schema), {
45
45
  ...options,
46
- returnType: "full",
46
+ fullResponse: true,
47
47
  });
48
48
  try {
49
49
  return {
50
50
  response,
51
- value: this.template.extractStructure(value),
52
- valueText: value,
51
+ value: this.template.extractStructure(text),
52
+ valueText: text,
53
53
  };
54
54
  }
55
55
  catch (error) {
56
56
  throw new StructureParseError_js_1.StructureParseError({
57
- valueText: value,
57
+ valueText: text,
58
58
  cause: error,
59
59
  });
60
60
  }
@@ -38,20 +38,20 @@ export class StructureFromTextStreamingModel extends StructureFromTextGeneration
38
38
  return queue;
39
39
  }
40
40
  async doGenerateStructure(schema, prompt, options) {
41
- const { response, value } = await generateText(this.model, this.template.createPrompt(prompt, schema), {
41
+ const { response, text } = await generateText(this.model, this.template.createPrompt(prompt, schema), {
42
42
  ...options,
43
- returnType: "full",
43
+ fullResponse: true,
44
44
  });
45
45
  try {
46
46
  return {
47
47
  response,
48
- value: this.template.extractStructure(value),
49
- valueText: value,
48
+ value: this.template.extractStructure(text),
49
+ valueText: text,
50
50
  };
51
51
  }
52
52
  catch (error) {
53
53
  throw new StructureParseError({
54
- valueText: value,
54
+ valueText: text,
55
55
  cause: error,
56
56
  });
57
57
  }
@@ -32,6 +32,12 @@ async function generateStructure(model, schema, prompt, options) {
32
32
  };
33
33
  },
34
34
  });
35
- return options?.returnType === "full" ? fullResponse : fullResponse.value;
35
+ return options?.fullResponse
36
+ ? {
37
+ structure: fullResponse.value,
38
+ response: fullResponse.response,
39
+ metadata: fullResponse.metadata,
40
+ }
41
+ : fullResponse.value;
36
42
  }
37
43
  exports.generateStructure = generateStructure;
@@ -38,12 +38,12 @@ import { StructureGenerationModel, StructureGenerationModelSettings } from "./St
38
38
  * @returns {Promise<STRUCTURE>} - Returns a promise that resolves to the generated structure.
39
39
  */
40
40
  export declare function generateStructure<STRUCTURE, PROMPT, SETTINGS extends StructureGenerationModelSettings>(model: StructureGenerationModel<PROMPT, SETTINGS>, schema: Schema<STRUCTURE> & JsonSchemaProducer, prompt: PROMPT | ((schema: Schema<STRUCTURE>) => PROMPT), options?: FunctionOptions & {
41
- returnType?: "structure";
41
+ fullResponse?: false;
42
42
  }): Promise<STRUCTURE>;
43
43
  export declare function generateStructure<STRUCTURE, PROMPT, SETTINGS extends StructureGenerationModelSettings>(model: StructureGenerationModel<PROMPT, SETTINGS>, schema: Schema<STRUCTURE> & JsonSchemaProducer, prompt: PROMPT | ((schema: Schema<STRUCTURE>) => PROMPT), options: FunctionOptions & {
44
- returnType: "full";
44
+ fullResponse: true;
45
45
  }): Promise<{
46
- value: STRUCTURE;
46
+ structure: STRUCTURE;
47
47
  response: unknown;
48
48
  metadata: ModelCallMetadata;
49
49
  }>;
@@ -29,5 +29,11 @@ export async function generateStructure(model, schema, prompt, options) {
29
29
  };
30
30
  },
31
31
  });
32
- return options?.returnType === "full" ? fullResponse : fullResponse.value;
32
+ return options?.fullResponse
33
+ ? {
34
+ structure: fullResponse.value,
35
+ response: fullResponse.response,
36
+ metadata: fullResponse.metadata,
37
+ }
38
+ : fullResponse.value;
33
39
  }
@@ -47,6 +47,11 @@ async function streamStructure(model, schema, prompt, options) {
47
47
  value: lastStructure,
48
48
  }),
49
49
  });
50
- return options?.returnType === "full" ? fullResponse : fullResponse.value;
50
+ return options?.fullResponse
51
+ ? {
52
+ structureStream: fullResponse.value,
53
+ metadata: fullResponse.metadata,
54
+ }
55
+ : fullResponse.value;
51
56
  }
52
57
  exports.streamStructure = streamStructure;
@@ -69,11 +69,11 @@ export type StructureStreamPart<STRUCTURE> = {
69
69
  * and a value that is either the partial structure or the final structure.
70
70
  */
71
71
  export declare function streamStructure<STRUCTURE, PROMPT>(model: StructureStreamingModel<PROMPT>, schema: Schema<STRUCTURE> & JsonSchemaProducer, prompt: PROMPT | ((schema: Schema<STRUCTURE>) => PROMPT), options?: FunctionOptions & {
72
- returnType?: "structure-stream";
72
+ fullResponse?: false;
73
73
  }): Promise<AsyncIterable<StructureStreamPart<STRUCTURE>>>;
74
74
  export declare function streamStructure<STRUCTURE, PROMPT>(model: StructureStreamingModel<PROMPT>, schema: Schema<STRUCTURE> & JsonSchemaProducer, prompt: PROMPT | ((schema: Schema<STRUCTURE>) => PROMPT), options: FunctionOptions & {
75
- returnType: "full";
75
+ fullResponse: true;
76
76
  }): Promise<{
77
- value: AsyncIterable<StructureStreamPart<STRUCTURE>>;
77
+ structureStream: AsyncIterable<StructureStreamPart<STRUCTURE>>;
78
78
  metadata: Omit<ModelCallMetadata, "durationInMs" | "finishTimestamp">;
79
79
  }>;
@@ -44,5 +44,10 @@ export async function streamStructure(model, schema, prompt, options) {
44
44
  value: lastStructure,
45
45
  }),
46
46
  });
47
- return options?.returnType === "full" ? fullResponse : fullResponse.value;
47
+ return options?.fullResponse
48
+ ? {
49
+ structureStream: fullResponse.value,
50
+ metadata: fullResponse.metadata,
51
+ }
52
+ : fullResponse.value;
48
53
  }
@@ -18,6 +18,12 @@ async function generateText(model, prompt, options) {
18
18
  };
19
19
  },
20
20
  });
21
- return options?.returnType === "full" ? fullResponse : fullResponse.value;
21
+ return options?.fullResponse
22
+ ? {
23
+ text: fullResponse.value,
24
+ response: fullResponse.response,
25
+ metadata: fullResponse.metadata,
26
+ }
27
+ : fullResponse.value;
22
28
  }
23
29
  exports.generateText = generateText;
@@ -23,12 +23,12 @@ import { TextGenerationModel, TextGenerationModelSettings } from "./TextGenerati
23
23
  * @returns {Promise<string>} - A promise that resolves to the generated text.
24
24
  */
25
25
  export declare function generateText<PROMPT>(model: TextGenerationModel<PROMPT, TextGenerationModelSettings>, prompt: PROMPT, options?: FunctionOptions & {
26
- returnType?: "text";
26
+ fullResponse?: false;
27
27
  }): Promise<string>;
28
28
  export declare function generateText<PROMPT>(model: TextGenerationModel<PROMPT, TextGenerationModelSettings>, prompt: PROMPT, options: FunctionOptions & {
29
- returnType: "full";
29
+ fullResponse: true;
30
30
  }): Promise<{
31
- value: string;
31
+ text: string;
32
32
  response: unknown;
33
33
  metadata: ModelCallMetadata;
34
34
  }>;
@@ -15,5 +15,11 @@ export async function generateText(model, prompt, options) {
15
15
  };
16
16
  },
17
17
  });
18
- return options?.returnType === "full" ? fullResponse : fullResponse.value;
18
+ return options?.fullResponse
19
+ ? {
20
+ text: fullResponse.value,
21
+ response: fullResponse.response,
22
+ metadata: fullResponse.metadata,
23
+ }
24
+ : fullResponse.value;
19
25
  }
@@ -45,6 +45,11 @@ async function streamText(model, prompt, options) {
45
45
  value: accumulatedText,
46
46
  }),
47
47
  });
48
- return options?.returnType === "full" ? fullResponse : fullResponse.value;
48
+ return options?.fullResponse
49
+ ? {
50
+ textStream: fullResponse.value,
51
+ metadata: fullResponse.metadata,
52
+ }
53
+ : fullResponse.value;
49
54
  }
50
55
  exports.streamText = streamText;
@@ -27,11 +27,11 @@ import { TextStreamingModel } from "./TextGenerationModel.js";
27
27
  * @returns {AsyncIterableResultPromise<string>} An async iterable promise that yields the generated text.
28
28
  */
29
29
  export declare function streamText<PROMPT>(model: TextStreamingModel<PROMPT>, prompt: PROMPT, options?: FunctionOptions & {
30
- returnType?: "text-stream";
30
+ fullResponse?: false;
31
31
  }): Promise<AsyncIterable<string>>;
32
32
  export declare function streamText<PROMPT>(model: TextStreamingModel<PROMPT>, prompt: PROMPT, options: FunctionOptions & {
33
- returnType: "full";
33
+ fullResponse: true;
34
34
  }): Promise<{
35
- value: AsyncIterable<string>;
35
+ textStream: AsyncIterable<string>;
36
36
  metadata: Omit<ModelCallMetadata, "durationInMs" | "finishTimestamp">;
37
37
  }>;
@@ -42,5 +42,10 @@ export async function streamText(model, prompt, options) {
42
42
  value: accumulatedText,
43
43
  }),
44
44
  });
45
- return options?.returnType === "full" ? fullResponse : fullResponse.value;
45
+ return options?.fullResponse
46
+ ? {
47
+ textStream: fullResponse.value,
48
+ metadata: fullResponse.metadata,
49
+ }
50
+ : fullResponse.value;
46
51
  }
@@ -16,6 +16,6 @@ async function generateTranscription(model, data, options) {
16
16
  };
17
17
  },
18
18
  });
19
- return options?.returnType === "full" ? fullResponse : fullResponse.value;
19
+ return options?.fullResponse ? fullResponse : fullResponse.value;
20
20
  }
21
21
  exports.generateTranscription = generateTranscription;
@@ -21,10 +21,10 @@ import { TranscriptionModel, TranscriptionModelSettings } from "./TranscriptionM
21
21
  * @returns {Promise<string>} A promise that resolves to the transcribed text.
22
22
  */
23
23
  export declare function generateTranscription<DATA>(model: TranscriptionModel<DATA, TranscriptionModelSettings>, data: DATA, options?: FunctionOptions & {
24
- returnType?: "text";
24
+ fullResponse?: false;
25
25
  }): Promise<string>;
26
26
  export declare function generateTranscription<DATA>(model: TranscriptionModel<DATA, TranscriptionModelSettings>, data: DATA, options: FunctionOptions & {
27
- returnType: "full";
27
+ fullResponse: true;
28
28
  }): Promise<{
29
29
  value: string;
30
30
  response: unknown;
@@ -13,5 +13,5 @@ export async function generateTranscription(model, data, options) {
13
13
  };
14
14
  },
15
15
  });
16
- return options?.returnType === "full" ? fullResponse : fullResponse.value;
16
+ return options?.fullResponse ? fullResponse : fullResponse.value;
17
17
  }