modelfusion 0.121.1 → 0.122.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. package/CHANGELOG.md +47 -1
  2. package/README.md +87 -85
  3. package/classifier/SemanticClassifier.cjs +8 -2
  4. package/classifier/SemanticClassifier.js +8 -2
  5. package/model-function/ModelCallEvent.d.ts +3 -0
  6. package/model-function/embed/embed.cjs +14 -14
  7. package/model-function/embed/embed.d.ts +24 -18
  8. package/model-function/embed/embed.js +14 -14
  9. package/model-function/generate-image/generateImage.cjs +6 -6
  10. package/model-function/generate-image/generateImage.d.ts +12 -9
  11. package/model-function/generate-image/generateImage.js +6 -6
  12. package/model-function/generate-speech/generateSpeech.cjs +7 -7
  13. package/model-function/generate-speech/generateSpeech.d.ts +12 -9
  14. package/model-function/generate-speech/generateSpeech.js +7 -7
  15. package/model-function/generate-speech/streamSpeech.cjs +6 -6
  16. package/model-function/generate-speech/streamSpeech.d.ts +12 -8
  17. package/model-function/generate-speech/streamSpeech.js +6 -6
  18. package/model-function/generate-structure/StructureFromTextGenerationModel.cjs +5 -3
  19. package/model-function/generate-structure/StructureFromTextGenerationModel.d.ts +1 -1
  20. package/model-function/generate-structure/StructureFromTextGenerationModel.js +5 -3
  21. package/model-function/generate-structure/StructureFromTextStreamingModel.cjs +5 -1
  22. package/model-function/generate-structure/StructureFromTextStreamingModel.js +5 -1
  23. package/model-function/generate-structure/StructureGenerationModel.d.ts +1 -1
  24. package/model-function/generate-structure/generateStructure.cjs +8 -8
  25. package/model-function/generate-structure/generateStructure.d.ts +17 -10
  26. package/model-function/generate-structure/generateStructure.js +8 -8
  27. package/model-function/generate-structure/streamStructure.cjs +6 -6
  28. package/model-function/generate-structure/streamStructure.d.ts +16 -10
  29. package/model-function/generate-structure/streamStructure.js +6 -6
  30. package/model-function/generate-text/generateText.cjs +6 -6
  31. package/model-function/generate-text/generateText.d.ts +12 -9
  32. package/model-function/generate-text/generateText.js +6 -6
  33. package/model-function/generate-text/streamText.cjs +6 -6
  34. package/model-function/generate-text/streamText.d.ts +12 -8
  35. package/model-function/generate-text/streamText.js +6 -6
  36. package/model-function/generate-transcription/generateTranscription.cjs +3 -3
  37. package/model-function/generate-transcription/generateTranscription.d.ts +12 -9
  38. package/model-function/generate-transcription/generateTranscription.js +3 -3
  39. package/model-provider/cohere/CohereTextGenerationModel.d.ts +12 -12
  40. package/model-provider/cohere/CohereTextGenerationModel.test.cjs +7 -4
  41. package/model-provider/cohere/CohereTextGenerationModel.test.js +7 -4
  42. package/model-provider/llamacpp/LlamaCppCompletionModel.d.ts +10 -10
  43. package/model-provider/llamacpp/LlamaCppCompletionModel.test.cjs +4 -1
  44. package/model-provider/llamacpp/LlamaCppCompletionModel.test.js +4 -1
  45. package/model-provider/mistral/MistralChatModel.test.cjs +15 -8
  46. package/model-provider/mistral/MistralChatModel.test.js +15 -8
  47. package/model-provider/ollama/OllamaChatModel.test.cjs +6 -1
  48. package/model-provider/ollama/OllamaChatModel.test.js +6 -1
  49. package/model-provider/ollama/OllamaCompletionModel.cjs +1 -1
  50. package/model-provider/ollama/OllamaCompletionModel.d.ts +7 -7
  51. package/model-provider/ollama/OllamaCompletionModel.js +1 -1
  52. package/model-provider/ollama/OllamaCompletionModel.test.cjs +31 -16
  53. package/model-provider/ollama/OllamaCompletionModel.test.js +31 -16
  54. package/model-provider/openai/OpenAIChatFunctionCallStructureGenerationModel.cjs +4 -4
  55. package/model-provider/openai/OpenAIChatFunctionCallStructureGenerationModel.d.ts +1 -1
  56. package/model-provider/openai/OpenAIChatFunctionCallStructureGenerationModel.js +4 -4
  57. package/model-provider/openai/OpenAIChatModel.test.cjs +21 -14
  58. package/model-provider/openai/OpenAIChatModel.test.js +21 -14
  59. package/model-provider/openai/OpenAICompletionModel.test.cjs +15 -9
  60. package/model-provider/openai/OpenAICompletionModel.test.js +15 -9
  61. package/package.json +1 -1
  62. package/tool/execute-tool/executeTool.cjs +5 -5
  63. package/tool/execute-tool/executeTool.d.ts +8 -4
  64. package/tool/execute-tool/executeTool.js +5 -5
  65. package/tool/execute-tool/safeExecuteToolCall.cjs +1 -1
  66. package/tool/execute-tool/safeExecuteToolCall.js +1 -1
  67. package/tool/generate-tool-call/TextGenerationToolCallModel.cjs +4 -2
  68. package/tool/generate-tool-call/TextGenerationToolCallModel.js +4 -2
  69. package/tool/generate-tool-call/generateToolCall.cjs +7 -7
  70. package/tool/generate-tool-call/generateToolCall.d.ts +11 -5
  71. package/tool/generate-tool-call/generateToolCall.js +7 -7
  72. package/tool/generate-tool-calls/TextGenerationToolCallsModel.cjs +4 -2
  73. package/tool/generate-tool-calls/TextGenerationToolCallsModel.js +4 -2
  74. package/tool/generate-tool-calls/generateToolCalls.cjs +3 -3
  75. package/tool/generate-tool-calls/generateToolCalls.d.ts +11 -5
  76. package/tool/generate-tool-calls/generateToolCalls.js +3 -3
  77. package/tool/use-tool/useTool.cjs +2 -2
  78. package/tool/use-tool/useTool.d.ts +5 -1
  79. package/tool/use-tool/useTool.js +2 -2
  80. package/tool/use-tools/useTools.cjs +8 -2
  81. package/tool/use-tools/useTools.d.ts +5 -1
  82. package/tool/use-tools/useTools.js +8 -2
  83. package/vector-index/VectorIndexRetriever.cjs +5 -1
  84. package/vector-index/VectorIndexRetriever.js +5 -1
  85. package/vector-index/upsertIntoVectorIndex.cjs +5 -1
  86. package/vector-index/upsertIntoVectorIndex.js +5 -1
@@ -14,7 +14,12 @@ describe("streamText", () => {
14
14
  `"done":true,"total_duration":4843619375,"load_duration":1101458,"prompt_eval_count":5,"prompt_eval_duration":199339000,` +
15
15
  `"eval_count":317,"eval_duration":4639772000}\n`,
16
16
  ];
17
- const stream = await streamText(new OllamaChatModel({ model: "mistral:text" }).withTextPrompt(), "hello");
17
+ const stream = await streamText({
18
+ model: new OllamaChatModel({
19
+ model: "mistral:text",
20
+ }).withTextPrompt(),
21
+ prompt: "hello",
22
+ });
18
23
  // note: space moved to last chunk bc of trimming
19
24
  expect(await arrayFromAsync(stream)).toStrictEqual([
20
25
  "Hello",
@@ -214,7 +214,7 @@ const ollamaCompletionResponseSchema = zod_1.z.object({
214
214
  response: zod_1.z.string(),
215
215
  total_duration: zod_1.z.number(),
216
216
  load_duration: zod_1.z.number().optional(),
217
- prompt_eval_count: zod_1.z.number(),
217
+ prompt_eval_count: zod_1.z.number().optional(),
218
218
  prompt_eval_duration: zod_1.z.number().optional(),
219
219
  eval_count: zod_1.z.number(),
220
220
  eval_duration: zod_1.z.number(),
@@ -67,10 +67,10 @@ export declare class OllamaCompletionModel<CONTEXT_WINDOW_SIZE extends number |
67
67
  response: string;
68
68
  created_at: string;
69
69
  total_duration: number;
70
- prompt_eval_count: number;
71
70
  eval_count: number;
72
71
  eval_duration: number;
73
72
  load_duration?: number | undefined;
73
+ prompt_eval_count?: number | undefined;
74
74
  prompt_eval_duration?: number | undefined;
75
75
  context?: number[] | undefined;
76
76
  };
@@ -86,10 +86,10 @@ export declare class OllamaCompletionModel<CONTEXT_WINDOW_SIZE extends number |
86
86
  response: string;
87
87
  created_at: string;
88
88
  total_duration: number;
89
- prompt_eval_count: number;
90
89
  eval_count: number;
91
90
  eval_duration: number;
92
91
  load_duration?: number | undefined;
92
+ prompt_eval_count?: number | undefined;
93
93
  prompt_eval_duration?: number | undefined;
94
94
  context?: number[] | undefined;
95
95
  };
@@ -105,10 +105,10 @@ export declare class OllamaCompletionModel<CONTEXT_WINDOW_SIZE extends number |
105
105
  response: string;
106
106
  created_at: string;
107
107
  total_duration: number;
108
- prompt_eval_count: number;
109
108
  eval_count: number;
110
109
  eval_duration: number;
111
110
  load_duration?: number | undefined;
111
+ prompt_eval_count?: number | undefined;
112
112
  prompt_eval_duration?: number | undefined;
113
113
  context?: number[] | undefined;
114
114
  };
@@ -155,7 +155,7 @@ declare const ollamaCompletionResponseSchema: z.ZodObject<{
155
155
  response: z.ZodString;
156
156
  total_duration: z.ZodNumber;
157
157
  load_duration: z.ZodOptional<z.ZodNumber>;
158
- prompt_eval_count: z.ZodNumber;
158
+ prompt_eval_count: z.ZodOptional<z.ZodNumber>;
159
159
  prompt_eval_duration: z.ZodOptional<z.ZodNumber>;
160
160
  eval_count: z.ZodNumber;
161
161
  eval_duration: z.ZodNumber;
@@ -166,10 +166,10 @@ declare const ollamaCompletionResponseSchema: z.ZodObject<{
166
166
  response: string;
167
167
  created_at: string;
168
168
  total_duration: number;
169
- prompt_eval_count: number;
170
169
  eval_count: number;
171
170
  eval_duration: number;
172
171
  load_duration?: number | undefined;
172
+ prompt_eval_count?: number | undefined;
173
173
  prompt_eval_duration?: number | undefined;
174
174
  context?: number[] | undefined;
175
175
  }, {
@@ -178,10 +178,10 @@ declare const ollamaCompletionResponseSchema: z.ZodObject<{
178
178
  response: string;
179
179
  created_at: string;
180
180
  total_duration: number;
181
- prompt_eval_count: number;
182
181
  eval_count: number;
183
182
  eval_duration: number;
184
183
  load_duration?: number | undefined;
184
+ prompt_eval_count?: number | undefined;
185
185
  prompt_eval_duration?: number | undefined;
186
186
  context?: number[] | undefined;
187
187
  }>;
@@ -262,10 +262,10 @@ export declare const OllamaCompletionResponseFormat: {
262
262
  response: string;
263
263
  created_at: string;
264
264
  total_duration: number;
265
- prompt_eval_count: number;
266
265
  eval_count: number;
267
266
  eval_duration: number;
268
267
  load_duration?: number | undefined;
268
+ prompt_eval_count?: number | undefined;
269
269
  prompt_eval_duration?: number | undefined;
270
270
  context?: number[] | undefined;
271
271
  }>;
@@ -210,7 +210,7 @@ const ollamaCompletionResponseSchema = z.object({
210
210
  response: z.string(),
211
211
  total_duration: z.number(),
212
212
  load_duration: z.number().optional(),
213
- prompt_eval_count: z.number(),
213
+ prompt_eval_count: z.number().optional(),
214
214
  prompt_eval_duration: z.number().optional(),
215
215
  eval_count: z.number(),
216
216
  eval_duration: z.number(),
@@ -34,9 +34,12 @@ describe("generateText", () => {
34
34
  eval_count: 113,
35
35
  eval_duration: 1325948000,
36
36
  };
37
- const result = await (0, generateText_js_1.generateText)(new OllamaCompletionModel_js_1.OllamaCompletionModel({
38
- model: "test-model",
39
- }).withTextPrompt(), "test prompt");
37
+ const result = await (0, generateText_js_1.generateText)({
38
+ model: new OllamaCompletionModel_js_1.OllamaCompletionModel({
39
+ model: "test-model",
40
+ }).withTextPrompt(),
41
+ prompt: "test prompt",
42
+ });
40
43
  expect(result).toEqual("test response");
41
44
  });
42
45
  it("should throw retryable ApiCallError when Ollama is overloaded", async () => {
@@ -47,12 +50,15 @@ describe("generateText", () => {
47
50
  done: false,
48
51
  };
49
52
  try {
50
- await (0, generateText_js_1.generateText)(new OllamaCompletionModel_js_1.OllamaCompletionModel({
51
- api: new OllamaApiConfiguration_js_1.OllamaApiConfiguration({
52
- retry: (0, retryNever_js_1.retryNever)(),
53
- }),
54
- model: "test-model",
55
- }).withTextPrompt(), "test prompt");
53
+ await (0, generateText_js_1.generateText)({
54
+ model: new OllamaCompletionModel_js_1.OllamaCompletionModel({
55
+ api: new OllamaApiConfiguration_js_1.OllamaApiConfiguration({
56
+ retry: (0, retryNever_js_1.retryNever)(),
57
+ }),
58
+ model: "test-model",
59
+ }).withTextPrompt(),
60
+ prompt: "test prompt",
61
+ });
56
62
  (0, assert_1.fail)("Should have thrown ApiCallError");
57
63
  }
58
64
  catch (expectedError) {
@@ -73,7 +79,12 @@ describe("streamText", () => {
73
79
  `"done":true,"context":[123,456,789],"total_duration":2165354041,"load_duration":1293958,` +
74
80
  `"prompt_eval_count":5,"prompt_eval_duration":193273000,"eval_count":136,"eval_duration":1966852000}\n`,
75
81
  ];
76
- const stream = await (0, streamText_js_1.streamText)(new OllamaCompletionModel_js_1.OllamaCompletionModel({ model: "mistral:text" }).withTextPrompt(), "hello");
82
+ const stream = await (0, streamText_js_1.streamText)({
83
+ model: new OllamaCompletionModel_js_1.OllamaCompletionModel({
84
+ model: "mistral:text",
85
+ }).withTextPrompt(),
86
+ prompt: "hello",
87
+ });
77
88
  // note: space moved to last chunk bc of trimming
78
89
  expect(await (0, arrayFromAsync_js_1.arrayFromAsync)(stream)).toStrictEqual([
79
90
  "Hello",
@@ -103,12 +114,16 @@ describe("streamStructure", () => {
103
114
  `"total_duration":521893000,"load_duration":957666,"prompt_eval_count":74,"prompt_eval_duration":302508000,` +
104
115
  `"eval_count":12,"eval_duration":215282000}\n`,
105
116
  ];
106
- const stream = await (0, streamStructure_js_1.streamStructure)(new OllamaCompletionModel_js_1.OllamaCompletionModel({
107
- model: "mistral:text",
108
- promptTemplate: OllamaCompletionPrompt_js_1.Text,
109
- format: "json",
110
- raw: true,
111
- }).asStructureGenerationModel(jsonStructurePrompt_js_1.jsonStructurePrompt.text()), (0, ZodSchema_js_1.zodSchema)(zod_1.z.object({ name: zod_1.z.string() })), "generate a name");
117
+ const stream = await (0, streamStructure_js_1.streamStructure)({
118
+ model: new OllamaCompletionModel_js_1.OllamaCompletionModel({
119
+ model: "mistral:text",
120
+ promptTemplate: OllamaCompletionPrompt_js_1.Text,
121
+ format: "json",
122
+ raw: true,
123
+ }).asStructureGenerationModel(jsonStructurePrompt_js_1.jsonStructurePrompt.text()),
124
+ schema: (0, ZodSchema_js_1.zodSchema)(zod_1.z.object({ name: zod_1.z.string() })),
125
+ prompt: "generate a name",
126
+ });
112
127
  // note: space moved to last chunk bc of trimming
113
128
  expect(await (0, arrayFromAsync_js_1.arrayFromAsync)(stream)).toStrictEqual([
114
129
  { isComplete: false, value: {} },
@@ -32,9 +32,12 @@ describe("generateText", () => {
32
32
  eval_count: 113,
33
33
  eval_duration: 1325948000,
34
34
  };
35
- const result = await generateText(new OllamaCompletionModel({
36
- model: "test-model",
37
- }).withTextPrompt(), "test prompt");
35
+ const result = await generateText({
36
+ model: new OllamaCompletionModel({
37
+ model: "test-model",
38
+ }).withTextPrompt(),
39
+ prompt: "test prompt",
40
+ });
38
41
  expect(result).toEqual("test response");
39
42
  });
40
43
  it("should throw retryable ApiCallError when Ollama is overloaded", async () => {
@@ -45,12 +48,15 @@ describe("generateText", () => {
45
48
  done: false,
46
49
  };
47
50
  try {
48
- await generateText(new OllamaCompletionModel({
49
- api: new OllamaApiConfiguration({
50
- retry: retryNever(),
51
- }),
52
- model: "test-model",
53
- }).withTextPrompt(), "test prompt");
51
+ await generateText({
52
+ model: new OllamaCompletionModel({
53
+ api: new OllamaApiConfiguration({
54
+ retry: retryNever(),
55
+ }),
56
+ model: "test-model",
57
+ }).withTextPrompt(),
58
+ prompt: "test prompt",
59
+ });
54
60
  fail("Should have thrown ApiCallError");
55
61
  }
56
62
  catch (expectedError) {
@@ -71,7 +77,12 @@ describe("streamText", () => {
71
77
  `"done":true,"context":[123,456,789],"total_duration":2165354041,"load_duration":1293958,` +
72
78
  `"prompt_eval_count":5,"prompt_eval_duration":193273000,"eval_count":136,"eval_duration":1966852000}\n`,
73
79
  ];
74
- const stream = await streamText(new OllamaCompletionModel({ model: "mistral:text" }).withTextPrompt(), "hello");
80
+ const stream = await streamText({
81
+ model: new OllamaCompletionModel({
82
+ model: "mistral:text",
83
+ }).withTextPrompt(),
84
+ prompt: "hello",
85
+ });
75
86
  // note: space moved to last chunk bc of trimming
76
87
  expect(await arrayFromAsync(stream)).toStrictEqual([
77
88
  "Hello",
@@ -101,12 +112,16 @@ describe("streamStructure", () => {
101
112
  `"total_duration":521893000,"load_duration":957666,"prompt_eval_count":74,"prompt_eval_duration":302508000,` +
102
113
  `"eval_count":12,"eval_duration":215282000}\n`,
103
114
  ];
104
- const stream = await streamStructure(new OllamaCompletionModel({
105
- model: "mistral:text",
106
- promptTemplate: Text,
107
- format: "json",
108
- raw: true,
109
- }).asStructureGenerationModel(jsonStructurePrompt.text()), zodSchema(z.object({ name: z.string() })), "generate a name");
115
+ const stream = await streamStructure({
116
+ model: new OllamaCompletionModel({
117
+ model: "mistral:text",
118
+ promptTemplate: Text,
119
+ format: "json",
120
+ raw: true,
121
+ }).asStructureGenerationModel(jsonStructurePrompt.text()),
122
+ schema: zodSchema(z.object({ name: z.string() })),
123
+ prompt: "generate a name",
124
+ });
110
125
  // note: space moved to last chunk bc of trimming
111
126
  expect(await arrayFromAsync(stream)).toStrictEqual([
112
127
  { isComplete: false, value: {} },
@@ -101,7 +101,7 @@ class OpenAIChatFunctionCallStructureGenerationModel {
101
101
  async doGenerateStructure(schema, prompt, // first argument of the function
102
102
  options) {
103
103
  const expandedPrompt = this.promptTemplate.format(prompt);
104
- const response = await this.model
104
+ const rawResponse = await this.model
105
105
  .withSettings({
106
106
  stopSequences: [
107
107
  ...(this.settings.stopSequences ?? []),
@@ -119,13 +119,13 @@ class OpenAIChatFunctionCallStructureGenerationModel {
119
119
  },
120
120
  ],
121
121
  });
122
- const valueText = response.choices[0].message.function_call.arguments;
122
+ const valueText = rawResponse.choices[0].message.function_call.arguments;
123
123
  try {
124
124
  return {
125
- response,
125
+ rawResponse,
126
126
  valueText,
127
127
  value: secure_json_parse_1.default.parse(valueText),
128
- usage: this.model.extractUsage(response),
128
+ usage: this.model.extractUsage(rawResponse),
129
129
  };
130
130
  }
131
131
  catch (error) {
@@ -127,7 +127,7 @@ OpenAIChatSettings> {
127
127
  */
128
128
  doGenerateStructure(schema: Schema<unknown> & JsonSchemaProducer, prompt: Parameters<PROMPT_TEMPLATE["format"]>[0], // first argument of the function
129
129
  options: FunctionCallOptions): Promise<{
130
- response: {
130
+ rawResponse: {
131
131
  object: "chat.completion";
132
132
  model: string;
133
133
  usage: {
@@ -95,7 +95,7 @@ export class OpenAIChatFunctionCallStructureGenerationModel {
95
95
  async doGenerateStructure(schema, prompt, // first argument of the function
96
96
  options) {
97
97
  const expandedPrompt = this.promptTemplate.format(prompt);
98
- const response = await this.model
98
+ const rawResponse = await this.model
99
99
  .withSettings({
100
100
  stopSequences: [
101
101
  ...(this.settings.stopSequences ?? []),
@@ -113,13 +113,13 @@ export class OpenAIChatFunctionCallStructureGenerationModel {
113
113
  },
114
114
  ],
115
115
  });
116
- const valueText = response.choices[0].message.function_call.arguments;
116
+ const valueText = rawResponse.choices[0].message.function_call.arguments;
117
117
  try {
118
118
  return {
119
- response,
119
+ rawResponse,
120
120
  valueText,
121
121
  value: SecureJSON.parse(valueText),
122
- usage: this.model.extractUsage(response),
122
+ usage: this.model.extractUsage(rawResponse),
123
123
  };
124
124
  }
125
125
  catch (error) {
@@ -27,11 +27,14 @@ describe("streamText", () => {
27
27
  `"system_fingerprint":null,"choices":[{"index":1,"delta":{},"finish_reason":"stop"}]}\n\n`,
28
28
  "data: [DONE]\n\n",
29
29
  ];
30
- const stream = await (0, streamText_js_1.streamText)(new OpenAIChatModel_js_1.OpenAIChatModel({
31
- api: new OpenAIApiConfiguration_js_1.OpenAIApiConfiguration({ apiKey: "test-key" }),
32
- model: "gpt-3.5-turbo",
33
- numberOfGenerations: 2,
34
- }).withTextPrompt(), "test prompt");
30
+ const stream = await (0, streamText_js_1.streamText)({
31
+ model: new OpenAIChatModel_js_1.OpenAIChatModel({
32
+ api: new OpenAIApiConfiguration_js_1.OpenAIApiConfiguration({ apiKey: "test-key" }),
33
+ model: "gpt-3.5-turbo",
34
+ numberOfGenerations: 2,
35
+ }).withTextPrompt(),
36
+ prompt: "test prompt",
37
+ });
35
38
  expect(await (0, arrayFromAsync_js_1.arrayFromAsync)(stream)).toStrictEqual(["A"]);
36
39
  });
37
40
  });
@@ -73,15 +76,19 @@ describe("streamStructure", () => {
73
76
  `"choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}\n\n`,
74
77
  `data: [DONE]\n\n`,
75
78
  ];
76
- const stream = await (0, streamStructure_js_1.streamStructure)(new OpenAIChatModel_js_1.OpenAIChatModel({
77
- api: new OpenAIApiConfiguration_js_1.OpenAIApiConfiguration({ apiKey: "test-key" }),
78
- model: "gpt-3.5-turbo",
79
- })
80
- .asFunctionCallStructureGenerationModel({
81
- fnName: "generateCharacter",
82
- fnDescription: "Generate character descriptions.",
83
- })
84
- .withTextPrompt(), (0, ZodSchema_js_1.zodSchema)(zod_1.z.object({ name: zod_1.z.string() })), "generate a name");
79
+ const stream = await (0, streamStructure_js_1.streamStructure)({
80
+ model: new OpenAIChatModel_js_1.OpenAIChatModel({
81
+ api: new OpenAIApiConfiguration_js_1.OpenAIApiConfiguration({ apiKey: "test-key" }),
82
+ model: "gpt-3.5-turbo",
83
+ })
84
+ .asFunctionCallStructureGenerationModel({
85
+ fnName: "generateCharacter",
86
+ fnDescription: "Generate character descriptions.",
87
+ })
88
+ .withTextPrompt(),
89
+ schema: (0, ZodSchema_js_1.zodSchema)(zod_1.z.object({ name: zod_1.z.string() })),
90
+ prompt: "generate a name",
91
+ });
85
92
  // note: space moved to last chunk bc of trimming
86
93
  expect(await (0, arrayFromAsync_js_1.arrayFromAsync)(stream)).toStrictEqual([
87
94
  { isComplete: false, value: {} },
@@ -25,11 +25,14 @@ describe("streamText", () => {
25
25
  `"system_fingerprint":null,"choices":[{"index":1,"delta":{},"finish_reason":"stop"}]}\n\n`,
26
26
  "data: [DONE]\n\n",
27
27
  ];
28
- const stream = await streamText(new OpenAIChatModel({
29
- api: new OpenAIApiConfiguration({ apiKey: "test-key" }),
30
- model: "gpt-3.5-turbo",
31
- numberOfGenerations: 2,
32
- }).withTextPrompt(), "test prompt");
28
+ const stream = await streamText({
29
+ model: new OpenAIChatModel({
30
+ api: new OpenAIApiConfiguration({ apiKey: "test-key" }),
31
+ model: "gpt-3.5-turbo",
32
+ numberOfGenerations: 2,
33
+ }).withTextPrompt(),
34
+ prompt: "test prompt",
35
+ });
33
36
  expect(await arrayFromAsync(stream)).toStrictEqual(["A"]);
34
37
  });
35
38
  });
@@ -71,15 +74,19 @@ describe("streamStructure", () => {
71
74
  `"choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}\n\n`,
72
75
  `data: [DONE]\n\n`,
73
76
  ];
74
- const stream = await streamStructure(new OpenAIChatModel({
75
- api: new OpenAIApiConfiguration({ apiKey: "test-key" }),
76
- model: "gpt-3.5-turbo",
77
- })
78
- .asFunctionCallStructureGenerationModel({
79
- fnName: "generateCharacter",
80
- fnDescription: "Generate character descriptions.",
81
- })
82
- .withTextPrompt(), zodSchema(z.object({ name: z.string() })), "generate a name");
77
+ const stream = await streamStructure({
78
+ model: new OpenAIChatModel({
79
+ api: new OpenAIApiConfiguration({ apiKey: "test-key" }),
80
+ model: "gpt-3.5-turbo",
81
+ })
82
+ .asFunctionCallStructureGenerationModel({
83
+ fnName: "generateCharacter",
84
+ fnDescription: "Generate character descriptions.",
85
+ })
86
+ .withTextPrompt(),
87
+ schema: zodSchema(z.object({ name: z.string() })),
88
+ prompt: "generate a name",
89
+ });
83
90
  // note: space moved to last chunk bc of trimming
84
91
  expect(await arrayFromAsync(stream)).toStrictEqual([
85
92
  { isComplete: false, value: {} },
@@ -20,10 +20,13 @@ describe("streamText", () => {
20
20
  `"choices":[{"text":"","index":0,"logprobs":null,"finish_reason":"length"}],"model":"gpt-3.5-turbo-instruct"}\n\n`,
21
21
  "data: [DONE]\n\n",
22
22
  ];
23
- const stream = await (0, streamText_js_1.streamText)(new OpenAICompletionModel_js_1.OpenAICompletionModel({
24
- api: new OpenAIApiConfiguration_js_1.OpenAIApiConfiguration({ apiKey: "test-key" }),
25
- model: "gpt-3.5-turbo-instruct",
26
- }), "hello");
23
+ const stream = await (0, streamText_js_1.streamText)({
24
+ model: new OpenAICompletionModel_js_1.OpenAICompletionModel({
25
+ api: new OpenAIApiConfiguration_js_1.OpenAIApiConfiguration({ apiKey: "test-key" }),
26
+ model: "gpt-3.5-turbo-instruct",
27
+ }),
28
+ prompt: "hello",
29
+ });
27
30
  // note: space moved to last chunk bc of trimming
28
31
  expect(await (0, arrayFromAsync_js_1.arrayFromAsync)(stream)).toStrictEqual([
29
32
  "Hello",
@@ -43,11 +46,14 @@ describe("streamText", () => {
43
46
  `"choices":[{"text":"","index":1,"logprobs":null,"finish_reason":"length"}],"model":"gpt-3.5-turbo-instruct"}\n\n`,
44
47
  "data: [DONE]\n\n",
45
48
  ];
46
- const stream = await (0, streamText_js_1.streamText)(new OpenAICompletionModel_js_1.OpenAICompletionModel({
47
- api: new OpenAIApiConfiguration_js_1.OpenAIApiConfiguration({ apiKey: "test-key" }),
48
- model: "gpt-3.5-turbo-instruct",
49
- numberOfGenerations: 2,
50
- }), "test prompt");
49
+ const stream = await (0, streamText_js_1.streamText)({
50
+ model: new OpenAICompletionModel_js_1.OpenAICompletionModel({
51
+ api: new OpenAIApiConfiguration_js_1.OpenAIApiConfiguration({ apiKey: "test-key" }),
52
+ model: "gpt-3.5-turbo-instruct",
53
+ numberOfGenerations: 2,
54
+ }),
55
+ prompt: "test prompt",
56
+ });
51
57
  expect(await (0, arrayFromAsync_js_1.arrayFromAsync)(stream)).toStrictEqual(["A"]);
52
58
  });
53
59
  });
@@ -18,10 +18,13 @@ describe("streamText", () => {
18
18
  `"choices":[{"text":"","index":0,"logprobs":null,"finish_reason":"length"}],"model":"gpt-3.5-turbo-instruct"}\n\n`,
19
19
  "data: [DONE]\n\n",
20
20
  ];
21
- const stream = await streamText(new OpenAICompletionModel({
22
- api: new OpenAIApiConfiguration({ apiKey: "test-key" }),
23
- model: "gpt-3.5-turbo-instruct",
24
- }), "hello");
21
+ const stream = await streamText({
22
+ model: new OpenAICompletionModel({
23
+ api: new OpenAIApiConfiguration({ apiKey: "test-key" }),
24
+ model: "gpt-3.5-turbo-instruct",
25
+ }),
26
+ prompt: "hello",
27
+ });
25
28
  // note: space moved to last chunk bc of trimming
26
29
  expect(await arrayFromAsync(stream)).toStrictEqual([
27
30
  "Hello",
@@ -41,11 +44,14 @@ describe("streamText", () => {
41
44
  `"choices":[{"text":"","index":1,"logprobs":null,"finish_reason":"length"}],"model":"gpt-3.5-turbo-instruct"}\n\n`,
42
45
  "data: [DONE]\n\n",
43
46
  ];
44
- const stream = await streamText(new OpenAICompletionModel({
45
- api: new OpenAIApiConfiguration({ apiKey: "test-key" }),
46
- model: "gpt-3.5-turbo-instruct",
47
- numberOfGenerations: 2,
48
- }), "test prompt");
47
+ const stream = await streamText({
48
+ model: new OpenAICompletionModel({
49
+ api: new OpenAIApiConfiguration({ apiKey: "test-key" }),
50
+ model: "gpt-3.5-turbo-instruct",
51
+ numberOfGenerations: 2,
52
+ }),
53
+ prompt: "test prompt",
54
+ });
49
55
  expect(await arrayFromAsync(stream)).toStrictEqual(["A"]);
50
56
  });
51
57
  });
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "modelfusion",
3
3
  "description": "The TypeScript library for building AI applications.",
4
- "version": "0.121.1",
4
+ "version": "0.122.0",
5
5
  "author": "Lars Grammel",
6
6
  "license": "MIT",
7
7
  "keywords": [
@@ -10,14 +10,14 @@ const getRun_js_1 = require("../../core/getRun.cjs");
10
10
  const DurationMeasurement_js_1 = require("../../util/DurationMeasurement.cjs");
11
11
  const runSafe_js_1 = require("../../util/runSafe.cjs");
12
12
  const ToolExecutionError_js_1 = require("../ToolExecutionError.cjs");
13
- async function executeTool(// eslint-disable-line @typescript-eslint/no-explicit-any
14
- tool, args, options) {
15
- const fullResponse = await doExecuteTool(tool, args, options);
16
- return options?.fullResponse ? fullResponse : fullResponse.output;
13
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
14
+ async function executeTool({ tool, args, fullResponse, ...options }) {
15
+ const callResponse = await doExecuteTool({ tool, args, ...options });
16
+ return fullResponse ? callResponse : callResponse.output;
17
17
  }
18
18
  exports.executeTool = executeTool;
19
19
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
20
- async function doExecuteTool(tool, args, options) {
20
+ async function doExecuteTool({ tool, args, ...options }) {
21
21
  const run = await (0, getRun_js_1.getRun)(options?.run);
22
22
  const eventSource = new FunctionEventSource_js_1.FunctionEventSource({
23
23
  observers: [
@@ -14,13 +14,17 @@ export type ExecuteToolMetadata = {
14
14
  * `executeTool` executes a tool with the given parameters.
15
15
  */
16
16
  export declare function executeTool<TOOL extends Tool<any, any, any>>(// eslint-disable-line @typescript-eslint/no-explicit-any
17
- tool: TOOL, args: TOOL["parameters"]["_type"], options?: FunctionOptions & {
17
+ params: {
18
+ tool: TOOL;
19
+ args: TOOL["parameters"]["_type"];
18
20
  fullResponse?: false;
19
- }): Promise<ReturnType<TOOL["execute"]>>;
21
+ } & FunctionOptions): Promise<ReturnType<TOOL["execute"]>>;
20
22
  export declare function executeTool<TOOL extends Tool<any, any, any>>(// eslint-disable-line @typescript-eslint/no-explicit-any
21
- tool: TOOL, args: TOOL["parameters"]["_type"], options: FunctionOptions & {
23
+ params: {
24
+ tool: TOOL;
25
+ args: TOOL["parameters"]["_type"];
22
26
  fullResponse: true;
23
- }): Promise<{
27
+ } & FunctionOptions): Promise<{
24
28
  output: Awaited<ReturnType<TOOL["execute"]>>;
25
29
  metadata: ExecuteToolMetadata;
26
30
  }>;
@@ -7,13 +7,13 @@ import { getRun } from "../../core/getRun.js";
7
7
  import { startDurationMeasurement } from "../../util/DurationMeasurement.js";
8
8
  import { runSafe } from "../../util/runSafe.js";
9
9
  import { ToolExecutionError } from "../ToolExecutionError.js";
10
- export async function executeTool(// eslint-disable-line @typescript-eslint/no-explicit-any
11
- tool, args, options) {
12
- const fullResponse = await doExecuteTool(tool, args, options);
13
- return options?.fullResponse ? fullResponse : fullResponse.output;
10
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
11
+ export async function executeTool({ tool, args, fullResponse, ...options }) {
12
+ const callResponse = await doExecuteTool({ tool, args, ...options });
13
+ return fullResponse ? callResponse : callResponse.output;
14
14
  }
15
15
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
16
- async function doExecuteTool(tool, args, options) {
16
+ async function doExecuteTool({ tool, args, ...options }) {
17
17
  const run = await getRun(options?.run);
18
18
  const eventSource = new FunctionEventSource({
19
19
  observers: [
@@ -11,7 +11,7 @@ async function safeExecuteToolCall(tool, toolCall, options) {
11
11
  toolCall,
12
12
  args: toolCall.args,
13
13
  ok: true,
14
- result: await (0, executeTool_js_1.executeTool)(tool, toolCall.args, options),
14
+ result: await (0, executeTool_js_1.executeTool)({ tool, args: toolCall.args, ...options }),
15
15
  };
16
16
  }
17
17
  catch (error) {
@@ -8,7 +8,7 @@ export async function safeExecuteToolCall(tool, toolCall, options) {
8
8
  toolCall,
9
9
  args: toolCall.args,
10
10
  ok: true,
11
- result: await executeTool(tool, toolCall.args, options),
11
+ result: await executeTool({ tool, args: toolCall.args, ...options }),
12
12
  };
13
13
  }
14
14
  catch (error) {
@@ -30,9 +30,11 @@ class TextGenerationToolCallModel {
30
30
  return this.model.settingsForEvent;
31
31
  }
32
32
  async doGenerateToolCall(tool, prompt, options) {
33
- const { rawResponse, text, metadata } = await (0, generateText_js_1.generateText)(this.model, this.format.createPrompt(prompt, tool), {
34
- ...options,
33
+ const { rawResponse, text, metadata } = await (0, generateText_js_1.generateText)({
34
+ model: this.model,
35
+ prompt: this.format.createPrompt(prompt, tool),
35
36
  fullResponse: true,
37
+ ...options,
36
38
  });
37
39
  try {
38
40
  return {
@@ -27,9 +27,11 @@ export class TextGenerationToolCallModel {
27
27
  return this.model.settingsForEvent;
28
28
  }
29
29
  async doGenerateToolCall(tool, prompt, options) {
30
- const { rawResponse, text, metadata } = await generateText(this.model, this.format.createPrompt(prompt, tool), {
31
- ...options,
30
+ const { rawResponse, text, metadata } = await generateText({
31
+ model: this.model,
32
+ prompt: this.format.createPrompt(prompt, tool),
32
33
  fullResponse: true,
34
+ ...options,
33
35
  });
34
36
  try {
35
37
  return {