modelfusion 0.101.0 → 0.103.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (77) hide show
  1. package/CHANGELOG.md +41 -0
  2. package/model-function/generate-image/ImageGenerationModel.d.ts +12 -2
  3. package/model-function/generate-image/PromptTemplateImageGenerationModel.cjs +3 -3
  4. package/model-function/generate-image/PromptTemplateImageGenerationModel.d.ts +2 -2
  5. package/model-function/generate-image/PromptTemplateImageGenerationModel.js +3 -3
  6. package/model-function/generate-image/generateImage.cjs +9 -7
  7. package/model-function/generate-image/generateImage.d.ts +2 -0
  8. package/model-function/generate-image/generateImage.js +9 -7
  9. package/model-function/generate-text/PromptTemplateTextGenerationModel.d.ts +1 -1
  10. package/model-function/generate-text/TextGenerationModel.cjs +7 -0
  11. package/model-function/generate-text/TextGenerationModel.d.ts +3 -1
  12. package/model-function/generate-text/TextGenerationModel.js +6 -1
  13. package/model-function/generate-text/TextGenerationResult.cjs +2 -0
  14. package/model-function/generate-text/TextGenerationResult.d.ts +11 -0
  15. package/model-function/generate-text/TextGenerationResult.js +1 -0
  16. package/model-function/generate-text/generateText.cjs +14 -9
  17. package/model-function/generate-text/generateText.d.ts +3 -0
  18. package/model-function/generate-text/generateText.js +14 -9
  19. package/model-function/generate-text/index.cjs +1 -0
  20. package/model-function/generate-text/index.d.ts +1 -0
  21. package/model-function/generate-text/index.js +1 -0
  22. package/model-provider/anthropic/AnthropicTextGenerationModel.cjs +23 -8
  23. package/model-provider/anthropic/AnthropicTextGenerationModel.d.ts +6 -1
  24. package/model-provider/anthropic/AnthropicTextGenerationModel.js +24 -9
  25. package/model-provider/automatic1111/Automatic1111ImageGenerationModel.cjs +7 -5
  26. package/model-provider/automatic1111/Automatic1111ImageGenerationModel.d.ts +16 -2
  27. package/model-provider/automatic1111/Automatic1111ImageGenerationModel.js +7 -5
  28. package/model-provider/automatic1111/Automatic1111ImageGenerationPrompt.d.ts +0 -1
  29. package/model-provider/cohere/CohereTextGenerationModel.cjs +22 -6
  30. package/model-provider/cohere/CohereTextGenerationModel.d.ts +6 -1
  31. package/model-provider/cohere/CohereTextGenerationModel.js +22 -6
  32. package/model-provider/elevenlabs/ElevenLabsSpeechModel.cjs +2 -2
  33. package/model-provider/elevenlabs/ElevenLabsSpeechModel.js +2 -2
  34. package/model-provider/huggingface/HuggingFaceTextGenerationModel.cjs +9 -8
  35. package/model-provider/huggingface/HuggingFaceTextGenerationModel.d.ts +4 -5
  36. package/model-provider/huggingface/HuggingFaceTextGenerationModel.js +9 -8
  37. package/model-provider/llamacpp/LlamaCppTextGenerationModel.cjs +51 -51
  38. package/model-provider/llamacpp/LlamaCppTextGenerationModel.d.ts +20 -17
  39. package/model-provider/llamacpp/LlamaCppTextGenerationModel.js +51 -51
  40. package/model-provider/mistral/MistralChatModel.cjs +19 -2
  41. package/model-provider/mistral/MistralChatModel.d.ts +6 -1
  42. package/model-provider/mistral/MistralChatModel.js +19 -2
  43. package/model-provider/ollama/OllamaChatModel.cjs +8 -3
  44. package/model-provider/ollama/OllamaChatModel.d.ts +4 -1
  45. package/model-provider/ollama/OllamaChatModel.js +8 -3
  46. package/model-provider/ollama/OllamaCompletionModel.cjs +8 -3
  47. package/model-provider/ollama/OllamaCompletionModel.d.ts +4 -1
  48. package/model-provider/ollama/OllamaCompletionModel.js +8 -3
  49. package/model-provider/openai/OpenAICompletionModel.cjs +20 -4
  50. package/model-provider/openai/OpenAICompletionModel.d.ts +6 -1
  51. package/model-provider/openai/OpenAICompletionModel.js +20 -4
  52. package/model-provider/openai/OpenAIImageGenerationModel.cjs +25 -31
  53. package/model-provider/openai/OpenAIImageGenerationModel.d.ts +2 -3
  54. package/model-provider/openai/OpenAIImageGenerationModel.js +25 -31
  55. package/model-provider/openai/chat/AbstractOpenAIChatModel.cjs +19 -1
  56. package/model-provider/openai/chat/AbstractOpenAIChatModel.d.ts +6 -1
  57. package/model-provider/openai/chat/AbstractOpenAIChatModel.js +19 -1
  58. package/model-provider/openai/chat/OpenAIChatModel.cjs +2 -3
  59. package/model-provider/openai/chat/OpenAIChatModel.js +2 -3
  60. package/model-provider/openai-compatible/OpenAICompatibleChatModel.cjs +2 -3
  61. package/model-provider/openai-compatible/OpenAICompatibleChatModel.js +2 -3
  62. package/model-provider/stability/StabilityApiConfiguration.cjs +12 -5
  63. package/model-provider/stability/StabilityApiConfiguration.d.ts +7 -8
  64. package/model-provider/stability/StabilityApiConfiguration.js +12 -5
  65. package/model-provider/stability/StabilityError.cjs +7 -31
  66. package/model-provider/stability/StabilityError.d.ts +2 -11
  67. package/model-provider/stability/StabilityError.js +6 -28
  68. package/model-provider/stability/StabilityFacade.cjs +11 -3
  69. package/model-provider/stability/StabilityFacade.d.ts +10 -2
  70. package/model-provider/stability/StabilityFacade.js +9 -2
  71. package/model-provider/stability/StabilityImageGenerationModel.cjs +39 -50
  72. package/model-provider/stability/StabilityImageGenerationModel.d.ts +42 -27
  73. package/model-provider/stability/StabilityImageGenerationModel.js +39 -50
  74. package/model-provider/stability/index.cjs +1 -3
  75. package/model-provider/stability/index.d.ts +1 -1
  76. package/model-provider/stability/index.js +0 -1
  77. package/package.json +2 -2
@@ -4,6 +4,7 @@ import { createJsonResponseHandler, postJsonToApi, } from "../../core/api/postTo
4
4
  import { ZodSchema } from "../../core/schema/ZodSchema.js";
5
5
  import { AbstractModel } from "../../model-function/AbstractModel.js";
6
6
  import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
7
+ import { textGenerationModelProperties, } from "../../model-function/generate-text/TextGenerationModel.js";
7
8
  import { AsyncQueue } from "../../util/AsyncQueue.js";
8
9
  import { parseJSON } from "../../core/schema/parseJSON.js";
9
10
  import { parseEventSourceStream } from "../../util/streaming/parseEventSourceStream.js";
@@ -34,25 +35,52 @@ export class LlamaCppTextGenerationModel extends AbstractModel {
34
35
  return this.settings.contextWindowSize;
35
36
  }
36
37
  async callAPI(prompt, options) {
38
+ const api = this.settings.api ?? new LlamaCppApiConfiguration();
39
+ const responseFormat = options.responseFormat;
40
+ const abortSignal = options.run?.abortSignal;
37
41
  return callWithRetryAndThrottle({
38
- retry: this.settings.api?.retry,
39
- throttle: this.settings.api?.throttle,
40
- call: async () => callLlamaCppTextGenerationAPI({
41
- ...this.settings,
42
- // mapping
43
- nPredict: this.settings.maxGenerationTokens,
44
- stop: this.settings.stopSequences,
45
- // other
46
- abortSignal: options.run?.abortSignal,
47
- prompt,
48
- responseFormat: options.responseFormat,
42
+ retry: api.retry,
43
+ throttle: api.throttle,
44
+ call: async () => postJsonToApi({
45
+ url: api.assembleUrl(`/completion`),
46
+ headers: api.headers,
47
+ body: {
48
+ stream: responseFormat.stream,
49
+ prompt: prompt.text,
50
+ image_data: prompt.images != null
51
+ ? Object.entries(prompt.images).map(([id, data]) => ({
52
+ id: +id,
53
+ data,
54
+ }))
55
+ : undefined,
56
+ cache_prompt: this.settings.cachePrompt,
57
+ temperature: this.settings.temperature,
58
+ top_k: this.settings.topK,
59
+ top_p: this.settings.topP,
60
+ n_predict: this.settings.maxGenerationTokens,
61
+ n_keep: this.settings.nKeep,
62
+ stop: this.settings.stopSequences,
63
+ tfs_z: this.settings.tfsZ,
64
+ typical_p: this.settings.typicalP,
65
+ repeat_penalty: this.settings.repeatPenalty,
66
+ repeat_last_n: this.settings.repeatLastN,
67
+ penalize_nl: this.settings.penalizeNl,
68
+ mirostat: this.settings.mirostat,
69
+ mirostat_tau: this.settings.mirostatTau,
70
+ mirostat_eta: this.settings.mirostatEta,
71
+ seed: this.settings.seed,
72
+ ignore_eos: this.settings.ignoreEos,
73
+ logit_bias: this.settings.logitBias,
74
+ },
75
+ failedResponseHandler: failedLlamaCppCallResponseHandler,
76
+ successfulResponseHandler: responseFormat.handler,
77
+ abortSignal,
49
78
  }),
50
79
  });
51
80
  }
52
81
  get settingsForEvent() {
53
82
  const eventSettingProperties = [
54
- "maxGenerationTokens",
55
- "stopSequences",
83
+ ...textGenerationModelProperties,
56
84
  "contextWindowSize",
57
85
  "cachePrompt",
58
86
  "temperature",
@@ -84,7 +112,16 @@ export class LlamaCppTextGenerationModel extends AbstractModel {
84
112
  });
85
113
  return {
86
114
  response,
87
- texts: [response.content],
115
+ textGenerationResults: [
116
+ {
117
+ text: response.content,
118
+ finishReason: response.stopped_eos || response.stopped_word
119
+ ? "stop"
120
+ : response.stopped_limit
121
+ ? "length"
122
+ : "unknown",
123
+ },
124
+ ],
88
125
  usage: {
89
126
  promptTokens: response.tokens_evaluated,
90
127
  completionTokens: response.tokens_predicted,
@@ -194,43 +231,6 @@ const llamaCppTextStreamingResponseSchema = new ZodSchema(z.discriminatedUnion("
194
231
  }),
195
232
  llamaCppTextGenerationResponseSchema,
196
233
  ]));
197
- async function callLlamaCppTextGenerationAPI({ api = new LlamaCppApiConfiguration(), abortSignal, responseFormat, prompt, cachePrompt, temperature, topK, topP, nPredict, nKeep, stop, tfsZ, typicalP, repeatPenalty, repeatLastN, penalizeNl, mirostat, mirostatTau, mirostatEta, seed, ignoreEos, logitBias, }) {
198
- return postJsonToApi({
199
- url: api.assembleUrl(`/completion`),
200
- headers: api.headers,
201
- body: {
202
- stream: responseFormat.stream,
203
- prompt: prompt.text,
204
- cache_prompt: cachePrompt,
205
- temperature,
206
- top_k: topK,
207
- top_p: topP,
208
- n_predict: nPredict,
209
- n_keep: nKeep,
210
- stop,
211
- tfs_z: tfsZ,
212
- typical_p: typicalP,
213
- repeat_penalty: repeatPenalty,
214
- repeat_last_n: repeatLastN,
215
- penalize_nl: penalizeNl,
216
- mirostat,
217
- mirostat_tau: mirostatTau,
218
- mirostat_eta: mirostatEta,
219
- seed,
220
- ignore_eos: ignoreEos,
221
- logit_bias: logitBias,
222
- image_data: prompt.images != null
223
- ? Object.entries(prompt.images).map(([id, data]) => ({
224
- id: +id,
225
- data,
226
- }))
227
- : undefined,
228
- },
229
- failedResponseHandler: failedLlamaCppCallResponseHandler,
230
- successfulResponseHandler: responseFormat.handler,
231
- abortSignal,
232
- });
233
- }
234
234
  async function createLlamaCppFullDeltaIterableQueue(stream) {
235
235
  const queue = new AsyncQueue();
236
236
  let content = "";
@@ -75,6 +75,9 @@ class MistralChatModel extends AbstractModel_js_1.AbstractModel {
75
75
  get settingsForEvent() {
76
76
  const eventSettingProperties = [
77
77
  "maxGenerationTokens",
78
+ "stopSequences",
79
+ "numberOfGenerations",
80
+ "trimWhitespace",
78
81
  "temperature",
79
82
  "topP",
80
83
  "safeMode",
@@ -89,9 +92,23 @@ class MistralChatModel extends AbstractModel_js_1.AbstractModel {
89
92
  });
90
93
  return {
91
94
  response,
92
- texts: response.choices.map((choice) => choice.message.content),
95
+ textGenerationResults: response.choices.map((choice) => ({
96
+ text: choice.message.content,
97
+ finishReason: this.translateFinishReason(choice.finish_reason),
98
+ })),
93
99
  };
94
100
  }
101
+ translateFinishReason(finishReason) {
102
+ switch (finishReason) {
103
+ case "stop":
104
+ return "stop";
105
+ case "length":
106
+ case "model_length":
107
+ return "length";
108
+ default:
109
+ return "unknown";
110
+ }
111
+ }
95
112
  doStreamText(prompt, options) {
96
113
  return this.callAPI(prompt, {
97
114
  ...options,
@@ -118,7 +135,7 @@ class MistralChatModel extends AbstractModel_js_1.AbstractModel {
118
135
  }
119
136
  withPromptTemplate(promptTemplate) {
120
137
  return new PromptTemplateTextStreamingModel_js_1.PromptTemplateTextStreamingModel({
121
- model: this,
138
+ model: this, // stop tokens are not supported by this model
122
139
  promptTemplate,
123
140
  });
124
141
  }
@@ -7,6 +7,7 @@ import { Delta } from "../../model-function/Delta.js";
7
7
  import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
8
8
  import { TextGenerationModelSettings, TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
9
9
  import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
10
+ import { TextGenerationFinishReason } from "../../model-function/generate-text/TextGenerationResult.js";
10
11
  export type MistralChatMessage = {
11
12
  role: "system" | "user" | "assistant";
12
13
  content: string;
@@ -78,8 +79,12 @@ export declare class MistralChatModel extends AbstractModel<MistralChatModelSett
78
79
  index: number;
79
80
  }[];
80
81
  };
81
- texts: string[];
82
+ textGenerationResults: {
83
+ text: string;
84
+ finishReason: TextGenerationFinishReason;
85
+ }[];
82
86
  }>;
87
+ private translateFinishReason;
83
88
  doStreamText(prompt: MistralChatPrompt, options?: FunctionOptions): Promise<AsyncIterable<Delta<string>>>;
84
89
  /**
85
90
  * Returns this model with a text prompt template.
@@ -72,6 +72,9 @@ export class MistralChatModel extends AbstractModel {
72
72
  get settingsForEvent() {
73
73
  const eventSettingProperties = [
74
74
  "maxGenerationTokens",
75
+ "stopSequences",
76
+ "numberOfGenerations",
77
+ "trimWhitespace",
75
78
  "temperature",
76
79
  "topP",
77
80
  "safeMode",
@@ -86,9 +89,23 @@ export class MistralChatModel extends AbstractModel {
86
89
  });
87
90
  return {
88
91
  response,
89
- texts: response.choices.map((choice) => choice.message.content),
92
+ textGenerationResults: response.choices.map((choice) => ({
93
+ text: choice.message.content,
94
+ finishReason: this.translateFinishReason(choice.finish_reason),
95
+ })),
90
96
  };
91
97
  }
98
+ translateFinishReason(finishReason) {
99
+ switch (finishReason) {
100
+ case "stop":
101
+ return "stop";
102
+ case "length":
103
+ case "model_length":
104
+ return "length";
105
+ default:
106
+ return "unknown";
107
+ }
108
+ }
92
109
  doStreamText(prompt, options) {
93
110
  return this.callAPI(prompt, {
94
111
  ...options,
@@ -115,7 +132,7 @@ export class MistralChatModel extends AbstractModel {
115
132
  }
116
133
  withPromptTemplate(promptTemplate) {
117
134
  return new PromptTemplateTextStreamingModel({
118
- model: this,
135
+ model: this, // stop tokens are not supported by this model
119
136
  promptTemplate,
120
137
  });
121
138
  }
@@ -9,6 +9,7 @@ const ZodSchema_js_1 = require("../../core/schema/ZodSchema.cjs");
9
9
  const parseJSON_js_1 = require("../../core/schema/parseJSON.cjs");
10
10
  const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
11
11
  const PromptTemplateTextStreamingModel_js_1 = require("../../model-function/generate-text/PromptTemplateTextStreamingModel.cjs");
12
+ const TextGenerationModel_js_1 = require("../../model-function/generate-text/TextGenerationModel.cjs");
12
13
  const TextGenerationToolCallModel_js_1 = require("../../tool/generate-tool-call/TextGenerationToolCallModel.cjs");
13
14
  const TextGenerationToolCallsOrGenerateTextModel_js_1 = require("../../tool/generate-tool-calls-or-text/TextGenerationToolCallsOrGenerateTextModel.cjs");
14
15
  const AsyncQueue_js_1 = require("../../util/AsyncQueue.cjs");
@@ -92,8 +93,7 @@ class OllamaChatModel extends AbstractModel_js_1.AbstractModel {
92
93
  }
93
94
  get settingsForEvent() {
94
95
  const eventSettingProperties = [
95
- "maxGenerationTokens",
96
- "stopSequences",
96
+ ...TextGenerationModel_js_1.textGenerationModelProperties,
97
97
  "temperature",
98
98
  "mirostat",
99
99
  "mirostatEta",
@@ -119,7 +119,12 @@ class OllamaChatModel extends AbstractModel_js_1.AbstractModel {
119
119
  });
120
120
  return {
121
121
  response,
122
- texts: [response.message.content],
122
+ textGenerationResults: [
123
+ {
124
+ text: response.message.content,
125
+ finishReason: "unknown",
126
+ },
127
+ ],
123
128
  };
124
129
  }
125
130
  doStreamText(prompt, options) {
@@ -52,7 +52,10 @@ export declare class OllamaChatModel extends AbstractModel<OllamaChatModelSettin
52
52
  load_duration?: number | undefined;
53
53
  prompt_eval_duration?: number | undefined;
54
54
  };
55
- texts: string[];
55
+ textGenerationResults: {
56
+ text: string;
57
+ finishReason: "unknown";
58
+ }[];
56
59
  }>;
57
60
  doStreamText(prompt: OllamaChatPrompt, options?: FunctionOptions): Promise<AsyncIterable<Delta<string>>>;
58
61
  asToolCallGenerationModel<INPUT_PROMPT>(promptTemplate: ToolCallPromptTemplate<INPUT_PROMPT, OllamaChatPrompt>): TextGenerationToolCallModel<INPUT_PROMPT, OllamaChatPrompt, this>;
@@ -6,6 +6,7 @@ import { ZodSchema } from "../../core/schema/ZodSchema.js";
6
6
  import { safeParseJSON } from "../../core/schema/parseJSON.js";
7
7
  import { AbstractModel } from "../../model-function/AbstractModel.js";
8
8
  import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
9
+ import { textGenerationModelProperties, } from "../../model-function/generate-text/TextGenerationModel.js";
9
10
  import { TextGenerationToolCallModel, } from "../../tool/generate-tool-call/TextGenerationToolCallModel.js";
10
11
  import { TextGenerationToolCallsOrGenerateTextModel, } from "../../tool/generate-tool-calls-or-text/TextGenerationToolCallsOrGenerateTextModel.js";
11
12
  import { AsyncQueue } from "../../util/AsyncQueue.js";
@@ -89,8 +90,7 @@ export class OllamaChatModel extends AbstractModel {
89
90
  }
90
91
  get settingsForEvent() {
91
92
  const eventSettingProperties = [
92
- "maxGenerationTokens",
93
- "stopSequences",
93
+ ...textGenerationModelProperties,
94
94
  "temperature",
95
95
  "mirostat",
96
96
  "mirostatEta",
@@ -116,7 +116,12 @@ export class OllamaChatModel extends AbstractModel {
116
116
  });
117
117
  return {
118
118
  response,
119
- texts: [response.message.content],
119
+ textGenerationResults: [
120
+ {
121
+ text: response.message.content,
122
+ finishReason: "unknown",
123
+ },
124
+ ],
120
125
  };
121
126
  }
122
127
  doStreamText(prompt, options) {
@@ -9,6 +9,7 @@ const ZodSchema_js_1 = require("../../core/schema/ZodSchema.cjs");
9
9
  const parseJSON_js_1 = require("../../core/schema/parseJSON.cjs");
10
10
  const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
11
11
  const PromptTemplateTextStreamingModel_js_1 = require("../../model-function/generate-text/PromptTemplateTextStreamingModel.cjs");
12
+ const TextGenerationModel_js_1 = require("../../model-function/generate-text/TextGenerationModel.cjs");
12
13
  const TextGenerationToolCallModel_js_1 = require("../../tool/generate-tool-call/TextGenerationToolCallModel.cjs");
13
14
  const TextGenerationToolCallsOrGenerateTextModel_js_1 = require("../../tool/generate-tool-calls-or-text/TextGenerationToolCallsOrGenerateTextModel.cjs");
14
15
  const AsyncQueue_js_1 = require("../../util/AsyncQueue.cjs");
@@ -90,8 +91,7 @@ class OllamaCompletionModel extends AbstractModel_js_1.AbstractModel {
90
91
  }
91
92
  get settingsForEvent() {
92
93
  const eventSettingProperties = [
93
- "maxGenerationTokens",
94
- "stopSequences",
94
+ ...TextGenerationModel_js_1.textGenerationModelProperties,
95
95
  "contextWindowSize",
96
96
  "temperature",
97
97
  "mirostat",
@@ -121,7 +121,12 @@ class OllamaCompletionModel extends AbstractModel_js_1.AbstractModel {
121
121
  });
122
122
  return {
123
123
  response,
124
- texts: [response.response],
124
+ textGenerationResults: [
125
+ {
126
+ text: response.response,
127
+ finishReason: "unknown",
128
+ },
129
+ ],
125
130
  };
126
131
  }
127
132
  doStreamText(prompt, options) {
@@ -65,7 +65,10 @@ export declare class OllamaCompletionModel<CONTEXT_WINDOW_SIZE extends number |
65
65
  prompt_eval_duration?: number | undefined;
66
66
  context?: number[] | undefined;
67
67
  };
68
- texts: string[];
68
+ textGenerationResults: {
69
+ text: string;
70
+ finishReason: "unknown";
71
+ }[];
69
72
  }>;
70
73
  doStreamText(prompt: OllamaCompletionPrompt, options?: FunctionOptions): Promise<AsyncIterable<Delta<string>>>;
71
74
  asToolCallGenerationModel<INPUT_PROMPT>(promptTemplate: ToolCallPromptTemplate<INPUT_PROMPT, OllamaCompletionPrompt>): TextGenerationToolCallModel<INPUT_PROMPT, OllamaCompletionPrompt, this>;
@@ -6,6 +6,7 @@ import { ZodSchema } from "../../core/schema/ZodSchema.js";
6
6
  import { safeParseJSON } from "../../core/schema/parseJSON.js";
7
7
  import { AbstractModel } from "../../model-function/AbstractModel.js";
8
8
  import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
9
+ import { textGenerationModelProperties, } from "../../model-function/generate-text/TextGenerationModel.js";
9
10
  import { TextGenerationToolCallModel, } from "../../tool/generate-tool-call/TextGenerationToolCallModel.js";
10
11
  import { TextGenerationToolCallsOrGenerateTextModel, } from "../../tool/generate-tool-calls-or-text/TextGenerationToolCallsOrGenerateTextModel.js";
11
12
  import { AsyncQueue } from "../../util/AsyncQueue.js";
@@ -87,8 +88,7 @@ export class OllamaCompletionModel extends AbstractModel {
87
88
  }
88
89
  get settingsForEvent() {
89
90
  const eventSettingProperties = [
90
- "maxGenerationTokens",
91
- "stopSequences",
91
+ ...textGenerationModelProperties,
92
92
  "contextWindowSize",
93
93
  "temperature",
94
94
  "mirostat",
@@ -118,7 +118,12 @@ export class OllamaCompletionModel extends AbstractModel {
118
118
  });
119
119
  return {
120
120
  response,
121
- texts: [response.response],
121
+ textGenerationResults: [
122
+ {
123
+ text: response.response,
124
+ finishReason: "unknown",
125
+ },
126
+ ],
122
127
  };
123
128
  }
124
129
  doStreamText(prompt, options) {
@@ -8,6 +8,7 @@ const ZodSchema_js_1 = require("../../core/schema/ZodSchema.cjs");
8
8
  const parseJSON_js_1 = require("../../core/schema/parseJSON.cjs");
9
9
  const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
10
10
  const PromptTemplateTextStreamingModel_js_1 = require("../../model-function/generate-text/PromptTemplateTextStreamingModel.cjs");
11
+ const TextGenerationModel_js_1 = require("../../model-function/generate-text/TextGenerationModel.cjs");
11
12
  const TextPromptTemplate_js_1 = require("../../model-function/generate-text/prompt-template/TextPromptTemplate.cjs");
12
13
  const countTokens_js_1 = require("../../model-function/tokenize-text/countTokens.cjs");
13
14
  const AsyncQueue_js_1 = require("../../util/AsyncQueue.cjs");
@@ -228,9 +229,7 @@ class OpenAICompletionModel extends AbstractModel_js_1.AbstractModel {
228
229
  }
229
230
  get settingsForEvent() {
230
231
  const eventSettingProperties = [
231
- "maxGenerationTokens",
232
- "stopSequences",
233
- "numberOfGenerations",
232
+ ...TextGenerationModel_js_1.textGenerationModelProperties,
234
233
  "suffix",
235
234
  "temperature",
236
235
  "topP",
@@ -251,7 +250,12 @@ class OpenAICompletionModel extends AbstractModel_js_1.AbstractModel {
251
250
  });
252
251
  return {
253
252
  response,
254
- texts: response.choices.map((choice) => choice.text),
253
+ textGenerationResults: response.choices.map((choice) => {
254
+ return {
255
+ finishReason: this.translateFinishReason(choice.finish_reason),
256
+ text: choice.text,
257
+ };
258
+ }),
255
259
  usage: {
256
260
  promptTokens: response.usage.prompt_tokens,
257
261
  completionTokens: response.usage.completion_tokens,
@@ -259,6 +263,18 @@ class OpenAICompletionModel extends AbstractModel_js_1.AbstractModel {
259
263
  },
260
264
  };
261
265
  }
266
+ translateFinishReason(finishReason) {
267
+ switch (finishReason) {
268
+ case "stop":
269
+ return "stop";
270
+ case "length":
271
+ return "length";
272
+ case "content_filter":
273
+ return "content-filter";
274
+ default:
275
+ return "unknown";
276
+ }
277
+ }
262
278
  doStreamText(prompt, options) {
263
279
  return this.callAPI(prompt, {
264
280
  ...options,
@@ -7,6 +7,7 @@ import { Delta } from "../../model-function/Delta.js";
7
7
  import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
8
8
  import { TextGenerationModelSettings, TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
9
9
  import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
10
+ import { TextGenerationFinishReason } from "../../model-function/generate-text/TextGenerationResult.js";
10
11
  import { TikTokenTokenizer } from "./TikTokenTokenizer.js";
11
12
  /**
12
13
  * @see https://platform.openai.com/docs/models/
@@ -162,13 +163,17 @@ export declare class OpenAICompletionModel extends AbstractModel<OpenAICompletio
162
163
  }[];
163
164
  system_fingerprint?: string | undefined;
164
165
  };
165
- texts: string[];
166
+ textGenerationResults: {
167
+ finishReason: TextGenerationFinishReason;
168
+ text: string;
169
+ }[];
166
170
  usage: {
167
171
  promptTokens: number;
168
172
  completionTokens: number;
169
173
  totalTokens: number;
170
174
  };
171
175
  }>;
176
+ private translateFinishReason;
172
177
  doStreamText(prompt: string, options?: FunctionOptions): Promise<AsyncIterable<Delta<string>>>;
173
178
  /**
174
179
  * Returns this model with an instruction prompt template.
@@ -5,6 +5,7 @@ import { ZodSchema } from "../../core/schema/ZodSchema.js";
5
5
  import { parseJSON } from "../../core/schema/parseJSON.js";
6
6
  import { AbstractModel } from "../../model-function/AbstractModel.js";
7
7
  import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
8
+ import { textGenerationModelProperties, } from "../../model-function/generate-text/TextGenerationModel.js";
8
9
  import { chat, instruction, } from "../../model-function/generate-text/prompt-template/TextPromptTemplate.js";
9
10
  import { countTokens } from "../../model-function/tokenize-text/countTokens.js";
10
11
  import { AsyncQueue } from "../../util/AsyncQueue.js";
@@ -222,9 +223,7 @@ export class OpenAICompletionModel extends AbstractModel {
222
223
  }
223
224
  get settingsForEvent() {
224
225
  const eventSettingProperties = [
225
- "maxGenerationTokens",
226
- "stopSequences",
227
- "numberOfGenerations",
226
+ ...textGenerationModelProperties,
228
227
  "suffix",
229
228
  "temperature",
230
229
  "topP",
@@ -245,7 +244,12 @@ export class OpenAICompletionModel extends AbstractModel {
245
244
  });
246
245
  return {
247
246
  response,
248
- texts: response.choices.map((choice) => choice.text),
247
+ textGenerationResults: response.choices.map((choice) => {
248
+ return {
249
+ finishReason: this.translateFinishReason(choice.finish_reason),
250
+ text: choice.text,
251
+ };
252
+ }),
249
253
  usage: {
250
254
  promptTokens: response.usage.prompt_tokens,
251
255
  completionTokens: response.usage.completion_tokens,
@@ -253,6 +257,18 @@ export class OpenAICompletionModel extends AbstractModel {
253
257
  },
254
258
  };
255
259
  }
260
+ translateFinishReason(finishReason) {
261
+ switch (finishReason) {
262
+ case "stop":
263
+ return "stop";
264
+ case "length":
265
+ return "length";
266
+ case "content_filter":
267
+ return "content-filter";
268
+ default:
269
+ return "unknown";
270
+ }
271
+ }
256
272
  doStreamText(prompt, options) {
257
273
  return this.callAPI(prompt, {
258
274
  ...options,
@@ -60,7 +60,7 @@ const calculateOpenAIImageGenerationCostInMillicents = ({ model, settings, }) =>
60
60
  if (cost == null) {
61
61
  return null;
62
62
  }
63
- return (settings.n ?? 1) * cost;
63
+ return (settings.numberOfGenerations ?? 1) * cost;
64
64
  };
65
65
  exports.calculateOpenAIImageGenerationCostInMillicents = calculateOpenAIImageGenerationCostInMillicents;
66
66
  /**
@@ -88,38 +88,48 @@ class OpenAIImageGenerationModel extends AbstractModel_js_1.AbstractModel {
88
88
  return this.settings.model;
89
89
  }
90
90
  async callAPI(prompt, options) {
91
- const run = options?.run;
91
+ const api = this.settings.api ?? new OpenAIApiConfiguration_js_1.OpenAIApiConfiguration();
92
+ const abortSignal = options?.run?.abortSignal;
93
+ const userId = options?.run?.userId;
92
94
  const responseFormat = options?.responseFormat;
93
- const callSettings = {
94
- ...this.settings,
95
- user: this.settings.isUserIdForwardingEnabled ? run?.userId : undefined,
96
- abortSignal: run?.abortSignal,
97
- responseFormat,
98
- prompt,
99
- };
100
95
  return (0, callWithRetryAndThrottle_js_1.callWithRetryAndThrottle)({
101
- retry: callSettings.api?.retry,
102
- throttle: callSettings.api?.throttle,
103
- call: async () => callOpenAIImageGenerationAPI(callSettings),
96
+ retry: api.retry,
97
+ throttle: api.throttle,
98
+ call: async () => {
99
+ return (0, postToApi_js_1.postJsonToApi)({
100
+ url: api.assembleUrl("/images/generations"),
101
+ headers: api.headers,
102
+ body: {
103
+ prompt,
104
+ n: this.settings.numberOfGenerations,
105
+ size: this.settings.size,
106
+ response_format: responseFormat.type,
107
+ user: this.settings.isUserIdForwardingEnabled ? userId : undefined,
108
+ },
109
+ failedResponseHandler: OpenAIError_js_1.failedOpenAICallResponseHandler,
110
+ successfulResponseHandler: responseFormat?.handler,
111
+ abortSignal,
112
+ });
113
+ },
104
114
  });
105
115
  }
106
116
  get settingsForEvent() {
107
117
  const eventSettingProperties = [
108
- "n",
118
+ "numberOfGenerations",
109
119
  "size",
110
120
  "quality",
111
121
  "style",
112
122
  ];
113
123
  return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
114
124
  }
115
- async doGenerateImage(prompt, options) {
125
+ async doGenerateImages(prompt, options) {
116
126
  const response = await this.callAPI(prompt, {
117
127
  responseFormat: exports.OpenAIImageGenerationResponseFormat.base64Json,
118
128
  ...options,
119
129
  });
120
130
  return {
121
131
  response,
122
- base64Image: response.data[0].b64_json,
132
+ base64Images: response.data.map((item) => item.b64_json),
123
133
  };
124
134
  }
125
135
  withPromptTemplate(promptTemplate) {
@@ -155,19 +165,3 @@ exports.OpenAIImageGenerationResponseFormat = {
155
165
  handler: (0, postToApi_js_1.createJsonResponseHandler)(openAIImageGenerationBase64JsonSchema),
156
166
  },
157
167
  };
158
- async function callOpenAIImageGenerationAPI({ api = new OpenAIApiConfiguration_js_1.OpenAIApiConfiguration(), abortSignal, prompt, n, size, responseFormat, user, }) {
159
- return (0, postToApi_js_1.postJsonToApi)({
160
- url: api.assembleUrl("/images/generations"),
161
- headers: api.headers,
162
- body: {
163
- prompt,
164
- n,
165
- size,
166
- response_format: responseFormat.type,
167
- user,
168
- },
169
- failedResponseHandler: OpenAIError_js_1.failedOpenAICallResponseHandler,
170
- successfulResponseHandler: responseFormat?.handler,
171
- abortSignal,
172
- });
173
- }
@@ -24,7 +24,6 @@ export declare const calculateOpenAIImageGenerationCostInMillicents: ({ model, s
24
24
  export type OpenAIImageModelType = keyof typeof OPENAI_IMAGE_MODELS;
25
25
  export interface OpenAIImageGenerationCallSettings {
26
26
  model: OpenAIImageModelType;
27
- n?: number;
28
27
  size?: "256x256" | "512x512" | "1024x1024" | "1792x1024" | "1024x1792";
29
28
  quality?: "standard" | "hd";
30
29
  style?: "vivid" | "natural";
@@ -52,14 +51,14 @@ export declare class OpenAIImageGenerationModel extends AbstractModel<OpenAIImag
52
51
  responseFormat: OpenAIImageGenerationResponseFormatType<RESULT>;
53
52
  } & FunctionOptions): Promise<RESULT>;
54
53
  get settingsForEvent(): Partial<OpenAIImageGenerationSettings>;
55
- doGenerateImage(prompt: string, options?: FunctionOptions): Promise<{
54
+ doGenerateImages(prompt: string, options?: FunctionOptions): Promise<{
56
55
  response: {
57
56
  data: {
58
57
  b64_json: string;
59
58
  }[];
60
59
  created: number;
61
60
  };
62
- base64Image: string;
61
+ base64Images: string[];
63
62
  }>;
64
63
  withPromptTemplate<INPUT_PROMPT>(promptTemplate: PromptTemplate<INPUT_PROMPT, string>): PromptTemplateImageGenerationModel<INPUT_PROMPT, string, OpenAIImageGenerationSettings, this>;
65
64
  withSettings(additionalSettings: Partial<OpenAIImageGenerationSettings>): this;