modelfusion 0.102.0 → 0.103.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. package/CHANGELOG.md +20 -0
  2. package/model-function/generate-text/PromptTemplateTextGenerationModel.d.ts +1 -1
  3. package/model-function/generate-text/TextGenerationModel.cjs +7 -0
  4. package/model-function/generate-text/TextGenerationModel.d.ts +3 -1
  5. package/model-function/generate-text/TextGenerationModel.js +6 -1
  6. package/model-function/generate-text/TextGenerationResult.cjs +2 -0
  7. package/model-function/generate-text/TextGenerationResult.d.ts +11 -0
  8. package/model-function/generate-text/TextGenerationResult.js +1 -0
  9. package/model-function/generate-text/generateText.cjs +14 -9
  10. package/model-function/generate-text/generateText.d.ts +3 -0
  11. package/model-function/generate-text/generateText.js +14 -9
  12. package/model-function/generate-text/index.cjs +1 -0
  13. package/model-function/generate-text/index.d.ts +1 -0
  14. package/model-function/generate-text/index.js +1 -0
  15. package/model-provider/anthropic/AnthropicTextGenerationModel.cjs +23 -8
  16. package/model-provider/anthropic/AnthropicTextGenerationModel.d.ts +6 -1
  17. package/model-provider/anthropic/AnthropicTextGenerationModel.js +24 -9
  18. package/model-provider/cohere/CohereTextGenerationModel.cjs +22 -6
  19. package/model-provider/cohere/CohereTextGenerationModel.d.ts +6 -1
  20. package/model-provider/cohere/CohereTextGenerationModel.js +22 -6
  21. package/model-provider/elevenlabs/ElevenLabsSpeechModel.cjs +2 -2
  22. package/model-provider/elevenlabs/ElevenLabsSpeechModel.js +2 -2
  23. package/model-provider/huggingface/HuggingFaceTextGenerationModel.cjs +9 -8
  24. package/model-provider/huggingface/HuggingFaceTextGenerationModel.d.ts +4 -5
  25. package/model-provider/huggingface/HuggingFaceTextGenerationModel.js +9 -8
  26. package/model-provider/llamacpp/LlamaCppTextGenerationModel.cjs +51 -51
  27. package/model-provider/llamacpp/LlamaCppTextGenerationModel.d.ts +14 -11
  28. package/model-provider/llamacpp/LlamaCppTextGenerationModel.js +51 -51
  29. package/model-provider/mistral/MistralChatModel.cjs +19 -2
  30. package/model-provider/mistral/MistralChatModel.d.ts +6 -1
  31. package/model-provider/mistral/MistralChatModel.js +19 -2
  32. package/model-provider/ollama/OllamaChatModel.cjs +8 -3
  33. package/model-provider/ollama/OllamaChatModel.d.ts +4 -1
  34. package/model-provider/ollama/OllamaChatModel.js +8 -3
  35. package/model-provider/ollama/OllamaCompletionModel.cjs +8 -3
  36. package/model-provider/ollama/OllamaCompletionModel.d.ts +4 -1
  37. package/model-provider/ollama/OllamaCompletionModel.js +8 -3
  38. package/model-provider/openai/OpenAICompletionModel.cjs +20 -4
  39. package/model-provider/openai/OpenAICompletionModel.d.ts +6 -1
  40. package/model-provider/openai/OpenAICompletionModel.js +20 -4
  41. package/model-provider/openai/chat/AbstractOpenAIChatModel.cjs +19 -1
  42. package/model-provider/openai/chat/AbstractOpenAIChatModel.d.ts +6 -1
  43. package/model-provider/openai/chat/AbstractOpenAIChatModel.js +19 -1
  44. package/model-provider/openai/chat/OpenAIChatModel.cjs +2 -3
  45. package/model-provider/openai/chat/OpenAIChatModel.js +2 -3
  46. package/model-provider/openai-compatible/OpenAICompatibleChatModel.cjs +2 -3
  47. package/model-provider/openai-compatible/OpenAICompatibleChatModel.js +2 -3
  48. package/model-provider/stability/StabilityImageGenerationModel.d.ts +5 -5
  49. package/package.json +2 -2
@@ -6,6 +6,7 @@ import { ZodSchema } from "../../core/schema/ZodSchema.js";
6
6
  import { safeParseJSON } from "../../core/schema/parseJSON.js";
7
7
  import { AbstractModel } from "../../model-function/AbstractModel.js";
8
8
  import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
9
+ import { textGenerationModelProperties, } from "../../model-function/generate-text/TextGenerationModel.js";
9
10
  import { TextGenerationToolCallModel, } from "../../tool/generate-tool-call/TextGenerationToolCallModel.js";
10
11
  import { TextGenerationToolCallsOrGenerateTextModel, } from "../../tool/generate-tool-calls-or-text/TextGenerationToolCallsOrGenerateTextModel.js";
11
12
  import { AsyncQueue } from "../../util/AsyncQueue.js";
@@ -87,8 +88,7 @@ export class OllamaCompletionModel extends AbstractModel {
87
88
  }
88
89
  get settingsForEvent() {
89
90
  const eventSettingProperties = [
90
- "maxGenerationTokens",
91
- "stopSequences",
91
+ ...textGenerationModelProperties,
92
92
  "contextWindowSize",
93
93
  "temperature",
94
94
  "mirostat",
@@ -118,7 +118,12 @@ export class OllamaCompletionModel extends AbstractModel {
118
118
  });
119
119
  return {
120
120
  response,
121
- texts: [response.response],
121
+ textGenerationResults: [
122
+ {
123
+ text: response.response,
124
+ finishReason: "unknown",
125
+ },
126
+ ],
122
127
  };
123
128
  }
124
129
  doStreamText(prompt, options) {
@@ -8,6 +8,7 @@ const ZodSchema_js_1 = require("../../core/schema/ZodSchema.cjs");
8
8
  const parseJSON_js_1 = require("../../core/schema/parseJSON.cjs");
9
9
  const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
10
10
  const PromptTemplateTextStreamingModel_js_1 = require("../../model-function/generate-text/PromptTemplateTextStreamingModel.cjs");
11
+ const TextGenerationModel_js_1 = require("../../model-function/generate-text/TextGenerationModel.cjs");
11
12
  const TextPromptTemplate_js_1 = require("../../model-function/generate-text/prompt-template/TextPromptTemplate.cjs");
12
13
  const countTokens_js_1 = require("../../model-function/tokenize-text/countTokens.cjs");
13
14
  const AsyncQueue_js_1 = require("../../util/AsyncQueue.cjs");
@@ -228,9 +229,7 @@ class OpenAICompletionModel extends AbstractModel_js_1.AbstractModel {
228
229
  }
229
230
  get settingsForEvent() {
230
231
  const eventSettingProperties = [
231
- "maxGenerationTokens",
232
- "stopSequences",
233
- "numberOfGenerations",
232
+ ...TextGenerationModel_js_1.textGenerationModelProperties,
234
233
  "suffix",
235
234
  "temperature",
236
235
  "topP",
@@ -251,7 +250,12 @@ class OpenAICompletionModel extends AbstractModel_js_1.AbstractModel {
251
250
  });
252
251
  return {
253
252
  response,
254
- texts: response.choices.map((choice) => choice.text),
253
+ textGenerationResults: response.choices.map((choice) => {
254
+ return {
255
+ finishReason: this.translateFinishReason(choice.finish_reason),
256
+ text: choice.text,
257
+ };
258
+ }),
255
259
  usage: {
256
260
  promptTokens: response.usage.prompt_tokens,
257
261
  completionTokens: response.usage.completion_tokens,
@@ -259,6 +263,18 @@ class OpenAICompletionModel extends AbstractModel_js_1.AbstractModel {
259
263
  },
260
264
  };
261
265
  }
266
+ translateFinishReason(finishReason) {
267
+ switch (finishReason) {
268
+ case "stop":
269
+ return "stop";
270
+ case "length":
271
+ return "length";
272
+ case "content_filter":
273
+ return "content-filter";
274
+ default:
275
+ return "unknown";
276
+ }
277
+ }
262
278
  doStreamText(prompt, options) {
263
279
  return this.callAPI(prompt, {
264
280
  ...options,
@@ -7,6 +7,7 @@ import { Delta } from "../../model-function/Delta.js";
7
7
  import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
8
8
  import { TextGenerationModelSettings, TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
9
9
  import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
10
+ import { TextGenerationFinishReason } from "../../model-function/generate-text/TextGenerationResult.js";
10
11
  import { TikTokenTokenizer } from "./TikTokenTokenizer.js";
11
12
  /**
12
13
  * @see https://platform.openai.com/docs/models/
@@ -162,13 +163,17 @@ export declare class OpenAICompletionModel extends AbstractModel<OpenAICompletio
162
163
  }[];
163
164
  system_fingerprint?: string | undefined;
164
165
  };
165
- texts: string[];
166
+ textGenerationResults: {
167
+ finishReason: TextGenerationFinishReason;
168
+ text: string;
169
+ }[];
166
170
  usage: {
167
171
  promptTokens: number;
168
172
  completionTokens: number;
169
173
  totalTokens: number;
170
174
  };
171
175
  }>;
176
+ private translateFinishReason;
172
177
  doStreamText(prompt: string, options?: FunctionOptions): Promise<AsyncIterable<Delta<string>>>;
173
178
  /**
174
179
  * Returns this model with an instruction prompt template.
@@ -5,6 +5,7 @@ import { ZodSchema } from "../../core/schema/ZodSchema.js";
5
5
  import { parseJSON } from "../../core/schema/parseJSON.js";
6
6
  import { AbstractModel } from "../../model-function/AbstractModel.js";
7
7
  import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
8
+ import { textGenerationModelProperties, } from "../../model-function/generate-text/TextGenerationModel.js";
8
9
  import { chat, instruction, } from "../../model-function/generate-text/prompt-template/TextPromptTemplate.js";
9
10
  import { countTokens } from "../../model-function/tokenize-text/countTokens.js";
10
11
  import { AsyncQueue } from "../../util/AsyncQueue.js";
@@ -222,9 +223,7 @@ export class OpenAICompletionModel extends AbstractModel {
222
223
  }
223
224
  get settingsForEvent() {
224
225
  const eventSettingProperties = [
225
- "maxGenerationTokens",
226
- "stopSequences",
227
- "numberOfGenerations",
226
+ ...textGenerationModelProperties,
228
227
  "suffix",
229
228
  "temperature",
230
229
  "topP",
@@ -245,7 +244,12 @@ export class OpenAICompletionModel extends AbstractModel {
245
244
  });
246
245
  return {
247
246
  response,
248
- texts: response.choices.map((choice) => choice.text),
247
+ textGenerationResults: response.choices.map((choice) => {
248
+ return {
249
+ finishReason: this.translateFinishReason(choice.finish_reason),
250
+ text: choice.text,
251
+ };
252
+ }),
249
253
  usage: {
250
254
  promptTokens: response.usage.prompt_tokens,
251
255
  completionTokens: response.usage.completion_tokens,
@@ -253,6 +257,18 @@ export class OpenAICompletionModel extends AbstractModel {
253
257
  },
254
258
  };
255
259
  }
260
+ translateFinishReason(finishReason) {
261
+ switch (finishReason) {
262
+ case "stop":
263
+ return "stop";
264
+ case "length":
265
+ return "length";
266
+ case "content_filter":
267
+ return "content-filter";
268
+ default:
269
+ return "unknown";
270
+ }
271
+ }
256
272
  doStreamText(prompt, options) {
257
273
  return this.callAPI(prompt, {
258
274
  ...options,
@@ -80,10 +80,28 @@ class AbstractOpenAIChatModel extends AbstractModel_js_1.AbstractModel {
80
80
  });
81
81
  return {
82
82
  response,
83
- texts: response.choices.map((choice) => choice.message.content ?? ""),
83
+ textGenerationResults: response.choices.map((choice) => ({
84
+ text: choice.message.content ?? "",
85
+ finishReason: this.translateFinishReason(choice.finish_reason),
86
+ })),
84
87
  usage: this.extractUsage(response),
85
88
  };
86
89
  }
90
+ translateFinishReason(finishReason) {
91
+ switch (finishReason) {
92
+ case "stop":
93
+ return "stop";
94
+ case "length":
95
+ return "length";
96
+ case "content_filter":
97
+ return "content-filter";
98
+ case "function_call":
99
+ case "tool_calls":
100
+ return "tool-calls";
101
+ default:
102
+ return "unknown";
103
+ }
104
+ }
87
105
  doStreamText(prompt, options) {
88
106
  return this.callAPI(prompt, {
89
107
  ...options,
@@ -5,6 +5,7 @@ import { ResponseHandler } from "../../../core/api/postToApi.js";
5
5
  import { AbstractModel } from "../../../model-function/AbstractModel.js";
6
6
  import { Delta } from "../../../model-function/Delta.js";
7
7
  import { TextGenerationModelSettings } from "../../../model-function/generate-text/TextGenerationModel.js";
8
+ import { TextGenerationFinishReason } from "../../../model-function/generate-text/TextGenerationResult.js";
8
9
  import { ToolDefinition } from "../../../tool/ToolDefinition.js";
9
10
  import { OpenAIChatMessage } from "./OpenAIChatMessage.js";
10
11
  export interface AbstractOpenAIChatCallSettings {
@@ -126,13 +127,17 @@ export declare abstract class AbstractOpenAIChatModel<SETTINGS extends AbstractO
126
127
  }[];
127
128
  system_fingerprint?: string | null | undefined;
128
129
  };
129
- texts: string[];
130
+ textGenerationResults: {
131
+ text: string;
132
+ finishReason: TextGenerationFinishReason;
133
+ }[];
130
134
  usage: {
131
135
  promptTokens: number;
132
136
  completionTokens: number;
133
137
  totalTokens: number;
134
138
  };
135
139
  }>;
140
+ private translateFinishReason;
136
141
  doStreamText(prompt: OpenAIChatPrompt, options?: FunctionOptions): Promise<AsyncIterable<Delta<string>>>;
137
142
  doGenerateToolCall(tool: ToolDefinition<string, unknown>, prompt: OpenAIChatPrompt, options?: FunctionOptions): Promise<{
138
143
  response: {
@@ -77,10 +77,28 @@ export class AbstractOpenAIChatModel extends AbstractModel {
77
77
  });
78
78
  return {
79
79
  response,
80
- texts: response.choices.map((choice) => choice.message.content ?? ""),
80
+ textGenerationResults: response.choices.map((choice) => ({
81
+ text: choice.message.content ?? "",
82
+ finishReason: this.translateFinishReason(choice.finish_reason),
83
+ })),
81
84
  usage: this.extractUsage(response),
82
85
  };
83
86
  }
87
+ translateFinishReason(finishReason) {
88
+ switch (finishReason) {
89
+ case "stop":
90
+ return "stop";
91
+ case "length":
92
+ return "length";
93
+ case "content_filter":
94
+ return "content-filter";
95
+ case "function_call":
96
+ case "tool_calls":
97
+ return "tool-calls";
98
+ default:
99
+ return "unknown";
100
+ }
101
+ }
84
102
  doStreamText(prompt, options) {
85
103
  return this.callAPI(prompt, {
86
104
  ...options,
@@ -3,6 +3,7 @@ Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.OpenAIChatModel = exports.calculateOpenAIChatCostInMillicents = exports.isOpenAIChatModel = exports.getOpenAIChatModelInformation = exports.OPENAI_CHAT_MODELS = void 0;
4
4
  const StructureFromTextStreamingModel_js_1 = require("../../../model-function/generate-structure/StructureFromTextStreamingModel.cjs");
5
5
  const PromptTemplateTextStreamingModel_js_1 = require("../../../model-function/generate-text/PromptTemplateTextStreamingModel.cjs");
6
+ const TextGenerationModel_js_1 = require("../../../model-function/generate-text/TextGenerationModel.cjs");
6
7
  const TikTokenTokenizer_js_1 = require("../TikTokenTokenizer.cjs");
7
8
  const AbstractOpenAIChatModel_js_1 = require("./AbstractOpenAIChatModel.cjs");
8
9
  const OpenAIChatFunctionCallStructureGenerationModel_js_1 = require("./OpenAIChatFunctionCallStructureGenerationModel.cjs");
@@ -196,9 +197,7 @@ class OpenAIChatModel extends AbstractOpenAIChatModel_js_1.AbstractOpenAIChatMod
196
197
  }
197
198
  get settingsForEvent() {
198
199
  const eventSettingProperties = [
199
- "maxGenerationTokens",
200
- "stopSequences",
201
- "numberOfGenerations",
200
+ ...TextGenerationModel_js_1.textGenerationModelProperties,
202
201
  "functions",
203
202
  "functionCall",
204
203
  "temperature",
@@ -1,5 +1,6 @@
1
1
  import { StructureFromTextStreamingModel } from "../../../model-function/generate-structure/StructureFromTextStreamingModel.js";
2
2
  import { PromptTemplateTextStreamingModel } from "../../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
3
+ import { textGenerationModelProperties, } from "../../../model-function/generate-text/TextGenerationModel.js";
3
4
  import { TikTokenTokenizer } from "../TikTokenTokenizer.js";
4
5
  import { AbstractOpenAIChatModel, } from "./AbstractOpenAIChatModel.js";
5
6
  import { OpenAIChatFunctionCallStructureGenerationModel } from "./OpenAIChatFunctionCallStructureGenerationModel.js";
@@ -190,9 +191,7 @@ export class OpenAIChatModel extends AbstractOpenAIChatModel {
190
191
  }
191
192
  get settingsForEvent() {
192
193
  const eventSettingProperties = [
193
- "maxGenerationTokens",
194
- "stopSequences",
195
- "numberOfGenerations",
194
+ ...textGenerationModelProperties,
196
195
  "functions",
197
196
  "functionCall",
198
197
  "temperature",
@@ -3,6 +3,7 @@ Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.OpenAICompatibleChatModel = void 0;
4
4
  const StructureFromTextStreamingModel_js_1 = require("../../model-function/generate-structure/StructureFromTextStreamingModel.cjs");
5
5
  const PromptTemplateTextStreamingModel_js_1 = require("../../model-function/generate-text/PromptTemplateTextStreamingModel.cjs");
6
+ const TextGenerationModel_js_1 = require("../../model-function/generate-text/TextGenerationModel.cjs");
6
7
  const AbstractOpenAIChatModel_js_1 = require("../openai/chat/AbstractOpenAIChatModel.cjs");
7
8
  const OpenAIChatPromptTemplate_js_1 = require("../openai/chat/OpenAIChatPromptTemplate.cjs");
8
9
  /**
@@ -43,9 +44,7 @@ class OpenAICompatibleChatModel extends AbstractOpenAIChatModel_js_1.AbstractOpe
43
44
  }
44
45
  get settingsForEvent() {
45
46
  const eventSettingProperties = [
46
- "stopSequences",
47
- "maxGenerationTokens",
48
- "numberOfGenerations",
47
+ ...TextGenerationModel_js_1.textGenerationModelProperties,
49
48
  "functions",
50
49
  "functionCall",
51
50
  "temperature",
@@ -1,5 +1,6 @@
1
1
  import { StructureFromTextStreamingModel } from "../../model-function/generate-structure/StructureFromTextStreamingModel.js";
2
2
  import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
3
+ import { textGenerationModelProperties, } from "../../model-function/generate-text/TextGenerationModel.js";
3
4
  import { AbstractOpenAIChatModel, } from "../openai/chat/AbstractOpenAIChatModel.js";
4
5
  import { chat, instruction, text, } from "../openai/chat/OpenAIChatPromptTemplate.js";
5
6
  /**
@@ -40,9 +41,7 @@ export class OpenAICompatibleChatModel extends AbstractOpenAIChatModel {
40
41
  }
41
42
  get settingsForEvent() {
42
43
  const eventSettingProperties = [
43
- "stopSequences",
44
- "maxGenerationTokens",
45
- "numberOfGenerations",
44
+ ...textGenerationModelProperties,
46
45
  "functions",
47
46
  "functionCall",
48
47
  "temperature",
@@ -69,9 +69,9 @@ export declare class StabilityImageGenerationModel extends AbstractModel<Stabili
69
69
  doGenerateImages(prompt: StabilityImageGenerationPrompt, options?: FunctionOptions): Promise<{
70
70
  response: {
71
71
  artifacts: {
72
+ finishReason: "ERROR" | "SUCCESS" | "CONTENT_FILTERED";
72
73
  base64: string;
73
74
  seed: number;
74
- finishReason: "SUCCESS" | "ERROR" | "CONTENT_FILTERED";
75
75
  }[];
76
76
  };
77
77
  base64Images: string[];
@@ -86,25 +86,25 @@ declare const stabilityImageGenerationResponseSchema: z.ZodObject<{
86
86
  seed: z.ZodNumber;
87
87
  finishReason: z.ZodEnum<["SUCCESS", "ERROR", "CONTENT_FILTERED"]>;
88
88
  }, "strip", z.ZodTypeAny, {
89
+ finishReason: "ERROR" | "SUCCESS" | "CONTENT_FILTERED";
89
90
  base64: string;
90
91
  seed: number;
91
- finishReason: "SUCCESS" | "ERROR" | "CONTENT_FILTERED";
92
92
  }, {
93
+ finishReason: "ERROR" | "SUCCESS" | "CONTENT_FILTERED";
93
94
  base64: string;
94
95
  seed: number;
95
- finishReason: "SUCCESS" | "ERROR" | "CONTENT_FILTERED";
96
96
  }>, "many">;
97
97
  }, "strip", z.ZodTypeAny, {
98
98
  artifacts: {
99
+ finishReason: "ERROR" | "SUCCESS" | "CONTENT_FILTERED";
99
100
  base64: string;
100
101
  seed: number;
101
- finishReason: "SUCCESS" | "ERROR" | "CONTENT_FILTERED";
102
102
  }[];
103
103
  }, {
104
104
  artifacts: {
105
+ finishReason: "ERROR" | "SUCCESS" | "CONTENT_FILTERED";
105
106
  base64: string;
106
107
  seed: number;
107
- finishReason: "SUCCESS" | "ERROR" | "CONTENT_FILTERED";
108
108
  }[];
109
109
  }>;
110
110
  export type StabilityImageGenerationResponse = z.infer<typeof stabilityImageGenerationResponseSchema>;
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "modelfusion",
3
3
  "description": "The TypeScript library for building multi-modal AI applications.",
4
- "version": "0.102.0",
4
+ "version": "0.103.0",
5
5
  "author": "Lars Grammel",
6
6
  "license": "MIT",
7
7
  "keywords": [
@@ -89,7 +89,7 @@
89
89
  "@vitest/coverage-v8": "^1.1.0",
90
90
  "@vitest/ui": "1.1.0",
91
91
  "eslint": "^8.45.0",
92
- "eslint-config-prettier": "9.0.0",
92
+ "eslint-config-prettier": "9.1.0",
93
93
  "fastify": "^4.0.0",
94
94
  "msw": "2.0.10"
95
95
  }