modelfusion 0.96.0 → 0.98.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. package/README.md +11 -4
  2. package/model-function/embed/embed.cjs +14 -2
  3. package/model-function/embed/embed.d.ts +6 -6
  4. package/model-function/embed/embed.js +14 -2
  5. package/model-function/generate-image/generateImage.cjs +10 -9
  6. package/model-function/generate-image/generateImage.d.ts +4 -6
  7. package/model-function/generate-image/generateImage.js +10 -9
  8. package/model-function/generate-speech/generateSpeech.cjs +7 -1
  9. package/model-function/generate-speech/generateSpeech.d.ts +3 -3
  10. package/model-function/generate-speech/generateSpeech.js +7 -1
  11. package/model-function/generate-speech/streamSpeech.cjs +6 -1
  12. package/model-function/generate-speech/streamSpeech.d.ts +3 -3
  13. package/model-function/generate-speech/streamSpeech.js +6 -1
  14. package/model-function/generate-structure/StructureFromTextGenerationModel.cjs +5 -5
  15. package/model-function/generate-structure/StructureFromTextGenerationModel.js +5 -5
  16. package/model-function/generate-structure/StructureFromTextStreamingModel.cjs +5 -5
  17. package/model-function/generate-structure/StructureFromTextStreamingModel.js +5 -5
  18. package/model-function/generate-structure/generateStructure.cjs +7 -1
  19. package/model-function/generate-structure/generateStructure.d.ts +3 -3
  20. package/model-function/generate-structure/generateStructure.js +7 -1
  21. package/model-function/generate-structure/streamStructure.cjs +6 -1
  22. package/model-function/generate-structure/streamStructure.d.ts +3 -3
  23. package/model-function/generate-structure/streamStructure.js +6 -1
  24. package/model-function/generate-text/generateText.cjs +7 -1
  25. package/model-function/generate-text/generateText.d.ts +3 -3
  26. package/model-function/generate-text/generateText.js +7 -1
  27. package/model-function/generate-text/streamText.cjs +6 -1
  28. package/model-function/generate-text/streamText.d.ts +3 -3
  29. package/model-function/generate-text/streamText.js +6 -1
  30. package/model-function/generate-transcription/generateTranscription.cjs +1 -1
  31. package/model-function/generate-transcription/generateTranscription.d.ts +2 -2
  32. package/model-function/generate-transcription/generateTranscription.js +1 -1
  33. package/model-provider/cohere/CohereTextEmbeddingModel.d.ts +3 -3
  34. package/model-provider/ollama/OllamaTextGenerationModel.cjs +60 -57
  35. package/model-provider/ollama/OllamaTextGenerationModel.d.ts +33 -22
  36. package/model-provider/ollama/OllamaTextGenerationModel.js +60 -57
  37. package/model-provider/ollama/OllamaTextGenerationModel.test.cjs +2 -2
  38. package/model-provider/ollama/OllamaTextGenerationModel.test.js +2 -2
  39. package/model-provider/openai/chat/AbstractOpenAIChatModel.cjs +1 -1
  40. package/model-provider/openai/chat/AbstractOpenAIChatModel.d.ts +49 -0
  41. package/model-provider/openai/chat/AbstractOpenAIChatModel.js +1 -1
  42. package/model-provider/openai/chat/OpenAIChatModel.test.cjs +61 -0
  43. package/model-provider/openai/chat/OpenAIChatModel.test.d.ts +1 -0
  44. package/model-provider/openai/chat/OpenAIChatModel.test.js +59 -0
  45. package/model-provider/openai/chat/OpenAIChatStreamIterable.cjs +8 -3
  46. package/model-provider/openai/chat/OpenAIChatStreamIterable.d.ts +1 -1
  47. package/model-provider/openai/chat/OpenAIChatStreamIterable.js +8 -3
  48. package/package.json +1 -1
  49. package/tool/execute-tool/executeTool.cjs +1 -1
  50. package/tool/execute-tool/executeTool.d.ts +2 -2
  51. package/tool/execute-tool/executeTool.js +1 -1
  52. package/tool/generate-tool-call/TextGenerationToolCallModel.cjs +4 -4
  53. package/tool/generate-tool-call/TextGenerationToolCallModel.js +4 -4
  54. package/tool/generate-tool-call/generateToolCall.cjs +7 -1
  55. package/tool/generate-tool-call/generateToolCall.d.ts +3 -3
  56. package/tool/generate-tool-call/generateToolCall.js +7 -1
  57. package/tool/generate-tool-calls-or-text/TextGenerationToolCallsOrGenerateTextModel.cjs +4 -4
  58. package/tool/generate-tool-calls-or-text/TextGenerationToolCallsOrGenerateTextModel.js +4 -4
  59. package/tool/generate-tool-calls-or-text/generateToolCallsOrText.cjs +1 -1
  60. package/tool/generate-tool-calls-or-text/generateToolCallsOrText.d.ts +2 -2
  61. package/tool/generate-tool-calls-or-text/generateToolCallsOrText.js +1 -1
  62. package/tool/use-tools-or-generate-text/useToolsOrGenerateText.cjs +1 -1
  63. package/tool/use-tools-or-generate-text/useToolsOrGenerateText.js +1 -1
@@ -0,0 +1,61 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ const msw_1 = require("msw");
4
+ const node_1 = require("msw/node");
5
+ const streamText_js_1 = require("../../../model-function/generate-text/streamText.cjs");
6
+ const OpenAIChatModel_js_1 = require("./OpenAIChatModel.cjs");
7
+ const OpenAIApiConfiguration_js_1 = require("../OpenAIApiConfiguration.cjs");
8
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
9
+ let responseChunks = [];
10
+ const server = (0, node_1.setupServer)(msw_1.http.post("https://api.openai.com/v1/chat/completions", () => {
11
+ const encoder = new TextEncoder();
12
+ const stream = new ReadableStream({
13
+ async start(controller) {
14
+ try {
15
+ for (const chunk of responseChunks) {
16
+ controller.enqueue(encoder.encode(chunk));
17
+ }
18
+ }
19
+ finally {
20
+ controller.close();
21
+ }
22
+ },
23
+ });
24
+ return new msw_1.HttpResponse(stream, {
25
+ status: 200,
26
+ headers: {
27
+ "Content-Type": "text/event-stream",
28
+ "Cache-Control": "no-cache",
29
+ Connection: "keep-alive",
30
+ },
31
+ });
32
+ }));
33
+ beforeAll(() => server.listen());
34
+ beforeEach(() => {
35
+ responseChunks = [];
36
+ });
37
+ afterEach(() => server.resetHandlers());
38
+ afterAll(() => server.close());
39
+ describe("streamText", () => {
40
+ it("should return only values from the first choice when using streamText", async () => {
41
+ responseChunks = [
42
+ `data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":""},"finish_reason":null}]}\n\n`,
43
+ `data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"A"},"finish_reason":null}]}\n\n`,
44
+ `data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,"choices":[{"index":1,"delta":{"role":"assistant","content":""},"finish_reason":null}]}\n\n`,
45
+ `data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,"choices":[{"index":1,"delta":{"content":"B"},"finish_reason":null}]}\n\n`,
46
+ `data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{},"finish_reason":"stop"}]}\n\n`,
47
+ `data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,"choices":[{"index":1,"delta":{},"finish_reason":"stop"}]}\n\n`,
48
+ "data: [DONE]\n\n",
49
+ ];
50
+ const stream = await (0, streamText_js_1.streamText)(new OpenAIChatModel_js_1.OpenAIChatModel({
51
+ api: new OpenAIApiConfiguration_js_1.OpenAIApiConfiguration({ apiKey: "test" }),
52
+ model: "gpt-3.5-turbo",
53
+ n: 2,
54
+ }).withTextPrompt(), "test prompt");
55
+ const chunks = [];
56
+ for await (const part of stream) {
57
+ chunks.push(part);
58
+ }
59
+ expect(chunks).toStrictEqual(["A"]);
60
+ });
61
+ });
@@ -0,0 +1,59 @@
1
+ import { HttpResponse, http } from "msw";
2
+ import { setupServer } from "msw/node";
3
+ import { streamText } from "../../../model-function/generate-text/streamText.js";
4
+ import { OpenAIChatModel } from "./OpenAIChatModel.js";
5
+ import { OpenAIApiConfiguration } from "../OpenAIApiConfiguration.js";
6
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
7
+ let responseChunks = [];
8
+ const server = setupServer(http.post("https://api.openai.com/v1/chat/completions", () => {
9
+ const encoder = new TextEncoder();
10
+ const stream = new ReadableStream({
11
+ async start(controller) {
12
+ try {
13
+ for (const chunk of responseChunks) {
14
+ controller.enqueue(encoder.encode(chunk));
15
+ }
16
+ }
17
+ finally {
18
+ controller.close();
19
+ }
20
+ },
21
+ });
22
+ return new HttpResponse(stream, {
23
+ status: 200,
24
+ headers: {
25
+ "Content-Type": "text/event-stream",
26
+ "Cache-Control": "no-cache",
27
+ Connection: "keep-alive",
28
+ },
29
+ });
30
+ }));
31
+ beforeAll(() => server.listen());
32
+ beforeEach(() => {
33
+ responseChunks = [];
34
+ });
35
+ afterEach(() => server.resetHandlers());
36
+ afterAll(() => server.close());
37
+ describe("streamText", () => {
38
+ it("should return only values from the first choice when using streamText", async () => {
39
+ responseChunks = [
40
+ `data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":""},"finish_reason":null}]}\n\n`,
41
+ `data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"A"},"finish_reason":null}]}\n\n`,
42
+ `data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,"choices":[{"index":1,"delta":{"role":"assistant","content":""},"finish_reason":null}]}\n\n`,
43
+ `data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,"choices":[{"index":1,"delta":{"content":"B"},"finish_reason":null}]}\n\n`,
44
+ `data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{},"finish_reason":"stop"}]}\n\n`,
45
+ `data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,"choices":[{"index":1,"delta":{},"finish_reason":"stop"}]}\n\n`,
46
+ "data: [DONE]\n\n",
47
+ ];
48
+ const stream = await streamText(new OpenAIChatModel({
49
+ api: new OpenAIApiConfiguration({ apiKey: "test" }),
50
+ model: "gpt-3.5-turbo",
51
+ n: 2,
52
+ }).withTextPrompt(), "test prompt");
53
+ const chunks = [];
54
+ for await (const part of stream) {
55
+ chunks.push(part);
56
+ }
57
+ expect(chunks).toStrictEqual(["A"]);
58
+ });
59
+ });
@@ -87,18 +87,23 @@ async function createOpenAIChatDeltaIterableQueue(stream, extractDeltaValue) {
87
87
  continue;
88
88
  }
89
89
  const completionChunk = eventData;
90
+ // reset delta for all existing streamDeltas
91
+ for (const delta of streamDelta) {
92
+ delta.delta = undefined;
93
+ }
90
94
  for (let i = 0; i < completionChunk.choices.length; i++) {
91
95
  const eventChoice = completionChunk.choices[i];
96
+ const index = eventChoice.index;
92
97
  const delta = eventChoice.delta;
93
- if (streamDelta[i] == null) {
94
- streamDelta[i] = {
98
+ if (streamDelta[index] == null) {
99
+ streamDelta[index] = {
95
100
  role: undefined,
96
101
  content: "",
97
102
  isComplete: false,
98
103
  delta,
99
104
  };
100
105
  }
101
- const choice = streamDelta[i];
106
+ const choice = streamDelta[index];
102
107
  choice.delta = delta;
103
108
  if (eventChoice.finish_reason != null) {
104
109
  choice.isComplete = true;
@@ -14,6 +14,6 @@ export type OpenAIChatDelta = Array<{
14
14
  name?: string;
15
15
  arguments?: string;
16
16
  };
17
- };
17
+ } | undefined;
18
18
  }>;
19
19
  export declare function createOpenAIChatDeltaIterableQueue<VALUE>(stream: ReadableStream<Uint8Array>, extractDeltaValue: (delta: OpenAIChatDelta) => VALUE): Promise<AsyncIterable<Delta<VALUE>>>;
@@ -84,18 +84,23 @@ export async function createOpenAIChatDeltaIterableQueue(stream, extractDeltaVal
84
84
  continue;
85
85
  }
86
86
  const completionChunk = eventData;
87
+ // reset delta for all existing streamDeltas
88
+ for (const delta of streamDelta) {
89
+ delta.delta = undefined;
90
+ }
87
91
  for (let i = 0; i < completionChunk.choices.length; i++) {
88
92
  const eventChoice = completionChunk.choices[i];
93
+ const index = eventChoice.index;
89
94
  const delta = eventChoice.delta;
90
- if (streamDelta[i] == null) {
91
- streamDelta[i] = {
95
+ if (streamDelta[index] == null) {
96
+ streamDelta[index] = {
92
97
  role: undefined,
93
98
  content: "",
94
99
  isComplete: false,
95
100
  delta,
96
101
  };
97
102
  }
98
- const choice = streamDelta[i];
103
+ const choice = streamDelta[index];
99
104
  choice.delta = delta;
100
105
  if (eventChoice.finish_reason != null) {
101
106
  choice.isComplete = true;
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "modelfusion",
3
3
  "description": "The TypeScript library for building multi-modal AI applications.",
4
- "version": "0.96.0",
4
+ "version": "0.98.0",
5
5
  "author": "Lars Grammel",
6
6
  "license": "MIT",
7
7
  "keywords": [
@@ -13,7 +13,7 @@ const ToolExecutionError_js_1 = require("../ToolExecutionError.cjs");
13
13
  async function executeTool(// eslint-disable-line @typescript-eslint/no-explicit-any
14
14
  tool, args, options) {
15
15
  const fullResponse = await doExecuteTool(tool, args, options);
16
- return options?.returnType === "full" ? fullResponse : fullResponse.output;
16
+ return options?.fullResponse ? fullResponse : fullResponse.output;
17
17
  }
18
18
  exports.executeTool = executeTool;
19
19
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
@@ -15,11 +15,11 @@ export type ExecuteToolMetadata = {
15
15
  */
16
16
  export declare function executeTool<TOOL extends Tool<any, any, any>>(// eslint-disable-line @typescript-eslint/no-explicit-any
17
17
  tool: TOOL, args: TOOL["parameters"]["_type"], options?: FunctionOptions & {
18
- returnType?: "output";
18
+ fullResponse?: false;
19
19
  }): Promise<ReturnType<TOOL["execute"]>>;
20
20
  export declare function executeTool<TOOL extends Tool<any, any, any>>(// eslint-disable-line @typescript-eslint/no-explicit-any
21
21
  tool: TOOL, args: TOOL["parameters"]["_type"], options: FunctionOptions & {
22
- returnType: "full";
22
+ fullResponse: true;
23
23
  }): Promise<{
24
24
  output: Awaited<ReturnType<TOOL["execute"]>>;
25
25
  metadata: ExecuteToolMetadata;
@@ -10,7 +10,7 @@ import { ToolExecutionError } from "../ToolExecutionError.js";
10
10
  export async function executeTool(// eslint-disable-line @typescript-eslint/no-explicit-any
11
11
  tool, args, options) {
12
12
  const fullResponse = await doExecuteTool(tool, args, options);
13
- return options?.returnType === "full" ? fullResponse : fullResponse.output;
13
+ return options?.fullResponse ? fullResponse : fullResponse.output;
14
14
  }
15
15
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
16
16
  async function doExecuteTool(tool, args, options) {
@@ -30,21 +30,21 @@ class TextGenerationToolCallModel {
30
30
  return this.model.settingsForEvent;
31
31
  }
32
32
  async doGenerateToolCall(tool, prompt, options) {
33
- const { response, value, metadata } = await (0, generateText_js_1.generateText)(this.model, this.format.createPrompt(prompt, tool), {
33
+ const { response, text, metadata } = await (0, generateText_js_1.generateText)(this.model, this.format.createPrompt(prompt, tool), {
34
34
  ...options,
35
- returnType: "full",
35
+ fullResponse: true,
36
36
  });
37
37
  try {
38
38
  return {
39
39
  response,
40
- toolCall: this.format.extractToolCall(value),
40
+ toolCall: this.format.extractToolCall(text),
41
41
  usage: metadata?.usage,
42
42
  };
43
43
  }
44
44
  catch (error) {
45
45
  throw new ToolCallParseError_js_1.ToolCallParseError({
46
46
  toolName: tool.name,
47
- valueText: value,
47
+ valueText: text,
48
48
  cause: error,
49
49
  });
50
50
  }
@@ -27,21 +27,21 @@ export class TextGenerationToolCallModel {
27
27
  return this.model.settingsForEvent;
28
28
  }
29
29
  async doGenerateToolCall(tool, prompt, options) {
30
- const { response, value, metadata } = await generateText(this.model, this.format.createPrompt(prompt, tool), {
30
+ const { response, text, metadata } = await generateText(this.model, this.format.createPrompt(prompt, tool), {
31
31
  ...options,
32
- returnType: "full",
32
+ fullResponse: true,
33
33
  });
34
34
  try {
35
35
  return {
36
36
  response,
37
- toolCall: this.format.extractToolCall(value),
37
+ toolCall: this.format.extractToolCall(text),
38
38
  usage: metadata?.usage,
39
39
  };
40
40
  }
41
41
  catch (error) {
42
42
  throw new ToolCallParseError({
43
43
  toolName: tool.name,
44
- valueText: value,
44
+ valueText: text,
45
45
  cause: error,
46
46
  });
47
47
  }
@@ -54,6 +54,12 @@ async function generateToolCall(model, tool, prompt, options) {
54
54
  }
55
55
  },
56
56
  });
57
- return options?.returnType === "full" ? fullResponse : fullResponse.value;
57
+ return options?.fullResponse
58
+ ? {
59
+ toolCall: fullResponse.value,
60
+ response: fullResponse.response,
61
+ metadata: fullResponse.metadata,
62
+ }
63
+ : fullResponse.value;
58
64
  }
59
65
  exports.generateToolCall = generateToolCall;
@@ -4,12 +4,12 @@ import { ToolCall } from "../ToolCall.js";
4
4
  import { ToolDefinition } from "../ToolDefinition.js";
5
5
  import { ToolCallGenerationModel, ToolCallGenerationModelSettings } from "./ToolCallGenerationModel.js";
6
6
  export declare function generateToolCall<PARAMETERS, PROMPT, NAME extends string, SETTINGS extends ToolCallGenerationModelSettings>(model: ToolCallGenerationModel<PROMPT, SETTINGS>, tool: ToolDefinition<NAME, PARAMETERS>, prompt: PROMPT | ((tool: ToolDefinition<NAME, PARAMETERS>) => PROMPT), options?: FunctionOptions & {
7
- returnType?: "structure";
7
+ fullResponse?: false;
8
8
  }): Promise<ToolCall<NAME, PARAMETERS>>;
9
9
  export declare function generateToolCall<PARAMETERS, PROMPT, NAME extends string, SETTINGS extends ToolCallGenerationModelSettings>(model: ToolCallGenerationModel<PROMPT, SETTINGS>, tool: ToolDefinition<NAME, PARAMETERS>, prompt: PROMPT | ((tool: ToolDefinition<NAME, PARAMETERS>) => PROMPT), options: FunctionOptions & {
10
- returnType: "full";
10
+ fullResponse: true;
11
11
  }): Promise<{
12
- value: ToolCall<NAME, PARAMETERS>;
12
+ toolCall: ToolCall<NAME, PARAMETERS>;
13
13
  response: unknown;
14
14
  metadata: ModelCallMetadata;
15
15
  }>;
@@ -51,5 +51,11 @@ export async function generateToolCall(model, tool, prompt, options) {
51
51
  }
52
52
  },
53
53
  });
54
- return options?.returnType === "full" ? fullResponse : fullResponse.value;
54
+ return options?.fullResponse
55
+ ? {
56
+ toolCall: fullResponse.value,
57
+ response: fullResponse.response,
58
+ metadata: fullResponse.metadata,
59
+ }
60
+ : fullResponse.value;
55
61
  }
@@ -30,12 +30,12 @@ class TextGenerationToolCallsOrGenerateTextModel {
30
30
  return this.model.settingsForEvent;
31
31
  }
32
32
  async doGenerateToolCallsOrText(tools, prompt, options) {
33
- const { response, value, metadata } = await (0, generateText_js_1.generateText)(this.model, this.template.createPrompt(prompt, tools), {
33
+ const { response, text: generatedText, metadata, } = await (0, generateText_js_1.generateText)(this.model, this.template.createPrompt(prompt, tools), {
34
34
  ...options,
35
- returnType: "full",
35
+ fullResponse: true,
36
36
  });
37
37
  try {
38
- const { text, toolCalls } = this.template.extractToolCallsAndText(value);
38
+ const { text, toolCalls } = this.template.extractToolCallsAndText(generatedText);
39
39
  return {
40
40
  response,
41
41
  text,
@@ -45,7 +45,7 @@ class TextGenerationToolCallsOrGenerateTextModel {
45
45
  }
46
46
  catch (error) {
47
47
  throw new ToolCallsOrTextParseError_js_1.ToolCallsOrTextParseError({
48
- valueText: value,
48
+ valueText: generatedText,
49
49
  cause: error,
50
50
  });
51
51
  }
@@ -27,12 +27,12 @@ export class TextGenerationToolCallsOrGenerateTextModel {
27
27
  return this.model.settingsForEvent;
28
28
  }
29
29
  async doGenerateToolCallsOrText(tools, prompt, options) {
30
- const { response, value, metadata } = await generateText(this.model, this.template.createPrompt(prompt, tools), {
30
+ const { response, text: generatedText, metadata, } = await generateText(this.model, this.template.createPrompt(prompt, tools), {
31
31
  ...options,
32
- returnType: "full",
32
+ fullResponse: true,
33
33
  });
34
34
  try {
35
- const { text, toolCalls } = this.template.extractToolCallsAndText(value);
35
+ const { text, toolCalls } = this.template.extractToolCallsAndText(generatedText);
36
36
  return {
37
37
  response,
38
38
  text,
@@ -42,7 +42,7 @@ export class TextGenerationToolCallsOrGenerateTextModel {
42
42
  }
43
43
  catch (error) {
44
44
  throw new ToolCallsOrTextParseError({
45
- valueText: value,
45
+ valueText: generatedText,
46
46
  cause: error,
47
47
  });
48
48
  }
@@ -58,6 +58,6 @@ async function generateToolCallsOrText(model, tools, prompt, options) {
58
58
  };
59
59
  },
60
60
  });
61
- return options?.returnType === "full" ? fullResponse : fullResponse.value;
61
+ return options?.fullResponse ? fullResponse : fullResponse.value;
62
62
  }
63
63
  exports.generateToolCallsOrText = generateToolCallsOrText;
@@ -15,13 +15,13 @@ type ToToolCallUnion<T> = {
15
15
  }[keyof T];
16
16
  type ToOutputValue<TOOL_CALLS extends ToolCallDefinitionArray<ToolDefinition<any, any>[]>> = ToToolCallUnion<ToToolCallDefinitionMap<TOOL_CALLS>>;
17
17
  export declare function generateToolCallsOrText<TOOLS extends Array<ToolDefinition<any, any>>, PROMPT>(model: ToolCallsOrTextGenerationModel<PROMPT, ToolCallsOrTextGenerationModelSettings>, tools: TOOLS, prompt: PROMPT | ((tools: TOOLS) => PROMPT), options?: FunctionOptions & {
18
- returnType?: "structure";
18
+ fullResponse?: false;
19
19
  }): Promise<{
20
20
  text: string | null;
21
21
  toolCalls: Array<ToOutputValue<TOOLS>> | null;
22
22
  }>;
23
23
  export declare function generateToolCallsOrText<TOOLS extends ToolDefinition<any, any>[], PROMPT>(model: ToolCallsOrTextGenerationModel<PROMPT, ToolCallsOrTextGenerationModelSettings>, tools: TOOLS, prompt: PROMPT | ((tools: TOOLS) => PROMPT), options: FunctionOptions & {
24
- returnType?: "full";
24
+ fullResponse?: boolean;
25
25
  }): Promise<{
26
26
  value: {
27
27
  text: string | null;
@@ -55,5 +55,5 @@ export async function generateToolCallsOrText(model, tools, prompt, options) {
55
55
  };
56
56
  },
57
57
  });
58
- return options?.returnType === "full" ? fullResponse : fullResponse.value;
58
+ return options?.fullResponse ? fullResponse : fullResponse.value;
59
59
  }
@@ -15,7 +15,7 @@ async function useToolsOrGenerateText(model, tools, prompt, options) {
15
15
  input: expandedPrompt,
16
16
  functionType: "use-tools-or-generate-text",
17
17
  execute: async (options) => {
18
- const modelResponse = await (0, generateToolCallsOrText_js_1.generateToolCallsOrText)(model, tools, expandedPrompt, { ...options, returnType: "structure" });
18
+ const modelResponse = await (0, generateToolCallsOrText_js_1.generateToolCallsOrText)(model, tools, expandedPrompt, { ...options, fullResponse: false });
19
19
  const { toolCalls, text } = modelResponse;
20
20
  // no tool calls:
21
21
  if (toolCalls == null) {
@@ -12,7 +12,7 @@ export async function useToolsOrGenerateText(model, tools, prompt, options) {
12
12
  input: expandedPrompt,
13
13
  functionType: "use-tools-or-generate-text",
14
14
  execute: async (options) => {
15
- const modelResponse = await generateToolCallsOrText(model, tools, expandedPrompt, { ...options, returnType: "structure" });
15
+ const modelResponse = await generateToolCallsOrText(model, tools, expandedPrompt, { ...options, fullResponse: false });
16
16
  const { toolCalls, text } = modelResponse;
17
17
  // no tool calls:
18
18
  if (toolCalls == null) {