modelfusion 0.102.0 → 0.104.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (97) hide show
  1. package/CHANGELOG.md +27 -0
  2. package/model-function/generate-text/PromptTemplateTextGenerationModel.d.ts +1 -1
  3. package/model-function/generate-text/TextGenerationModel.cjs +7 -0
  4. package/model-function/generate-text/TextGenerationModel.d.ts +3 -1
  5. package/model-function/generate-text/TextGenerationModel.js +6 -1
  6. package/model-function/generate-text/TextGenerationResult.cjs +2 -0
  7. package/model-function/generate-text/TextGenerationResult.d.ts +11 -0
  8. package/model-function/generate-text/TextGenerationResult.js +1 -0
  9. package/model-function/generate-text/generateText.cjs +14 -9
  10. package/model-function/generate-text/generateText.d.ts +3 -0
  11. package/model-function/generate-text/generateText.js +14 -9
  12. package/model-function/generate-text/index.cjs +1 -0
  13. package/model-function/generate-text/index.d.ts +1 -0
  14. package/model-function/generate-text/index.js +1 -0
  15. package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.cjs +2 -1
  16. package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.d.ts +2 -2
  17. package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.js +2 -1
  18. package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.cjs +5 -4
  19. package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.d.ts +4 -4
  20. package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.js +5 -4
  21. package/model-function/generate-text/prompt-template/ChatPrompt.cjs +0 -24
  22. package/model-function/generate-text/prompt-template/ChatPrompt.d.ts +11 -34
  23. package/model-function/generate-text/prompt-template/ChatPrompt.js +1 -22
  24. package/model-function/generate-text/prompt-template/Content.cjs +9 -0
  25. package/model-function/generate-text/prompt-template/Content.d.ts +9 -4
  26. package/model-function/generate-text/prompt-template/Content.js +7 -1
  27. package/model-function/generate-text/prompt-template/InstructionPrompt.d.ts +6 -22
  28. package/model-function/generate-text/prompt-template/Llama2PromptTemplate.cjs +36 -5
  29. package/model-function/generate-text/prompt-template/Llama2PromptTemplate.d.ts +16 -4
  30. package/model-function/generate-text/prompt-template/Llama2PromptTemplate.js +34 -4
  31. package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.cjs +5 -4
  32. package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.d.ts +4 -4
  33. package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.js +5 -4
  34. package/model-function/generate-text/prompt-template/TextPromptTemplate.cjs +3 -4
  35. package/model-function/generate-text/prompt-template/TextPromptTemplate.d.ts +4 -4
  36. package/model-function/generate-text/prompt-template/TextPromptTemplate.js +3 -4
  37. package/model-function/generate-text/prompt-template/VicunaPromptTemplate.cjs +3 -3
  38. package/model-function/generate-text/prompt-template/VicunaPromptTemplate.d.ts +2 -2
  39. package/model-function/generate-text/prompt-template/VicunaPromptTemplate.js +3 -3
  40. package/model-function/generate-text/prompt-template/trimChatPrompt.cjs +0 -2
  41. package/model-function/generate-text/prompt-template/trimChatPrompt.d.ts +4 -4
  42. package/model-function/generate-text/prompt-template/trimChatPrompt.js +0 -2
  43. package/model-provider/anthropic/AnthropicPromptTemplate.cjs +5 -4
  44. package/model-provider/anthropic/AnthropicPromptTemplate.d.ts +4 -4
  45. package/model-provider/anthropic/AnthropicPromptTemplate.js +5 -4
  46. package/model-provider/anthropic/AnthropicTextGenerationModel.cjs +23 -8
  47. package/model-provider/anthropic/AnthropicTextGenerationModel.d.ts +8 -3
  48. package/model-provider/anthropic/AnthropicTextGenerationModel.js +24 -9
  49. package/model-provider/automatic1111/Automatic1111ImageGenerationModel.d.ts +1 -1
  50. package/model-provider/cohere/CohereTextGenerationModel.cjs +22 -6
  51. package/model-provider/cohere/CohereTextGenerationModel.d.ts +8 -3
  52. package/model-provider/cohere/CohereTextGenerationModel.js +22 -6
  53. package/model-provider/elevenlabs/ElevenLabsSpeechModel.cjs +2 -2
  54. package/model-provider/elevenlabs/ElevenLabsSpeechModel.js +2 -2
  55. package/model-provider/huggingface/HuggingFaceTextGenerationModel.cjs +9 -8
  56. package/model-provider/huggingface/HuggingFaceTextGenerationModel.d.ts +4 -5
  57. package/model-provider/huggingface/HuggingFaceTextGenerationModel.js +9 -8
  58. package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.cjs +23 -16
  59. package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.d.ts +4 -4
  60. package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.js +23 -16
  61. package/model-provider/llamacpp/LlamaCppTextGenerationModel.cjs +51 -51
  62. package/model-provider/llamacpp/LlamaCppTextGenerationModel.d.ts +14 -11
  63. package/model-provider/llamacpp/LlamaCppTextGenerationModel.js +51 -51
  64. package/model-provider/mistral/MistralChatModel.cjs +19 -2
  65. package/model-provider/mistral/MistralChatModel.d.ts +8 -3
  66. package/model-provider/mistral/MistralChatModel.js +19 -2
  67. package/model-provider/mistral/MistralPromptTemplate.cjs +5 -4
  68. package/model-provider/mistral/MistralPromptTemplate.d.ts +4 -4
  69. package/model-provider/mistral/MistralPromptTemplate.js +5 -4
  70. package/model-provider/ollama/OllamaChatModel.cjs +8 -3
  71. package/model-provider/ollama/OllamaChatModel.d.ts +6 -3
  72. package/model-provider/ollama/OllamaChatModel.js +8 -3
  73. package/model-provider/ollama/OllamaChatPromptTemplate.cjs +9 -13
  74. package/model-provider/ollama/OllamaChatPromptTemplate.d.ts +4 -4
  75. package/model-provider/ollama/OllamaChatPromptTemplate.js +9 -13
  76. package/model-provider/ollama/OllamaCompletionModel.cjs +8 -3
  77. package/model-provider/ollama/OllamaCompletionModel.d.ts +4 -1
  78. package/model-provider/ollama/OllamaCompletionModel.js +8 -3
  79. package/model-provider/openai/OpenAICompletionModel.cjs +20 -4
  80. package/model-provider/openai/OpenAICompletionModel.d.ts +8 -3
  81. package/model-provider/openai/OpenAICompletionModel.js +20 -4
  82. package/model-provider/openai/chat/AbstractOpenAIChatModel.cjs +19 -1
  83. package/model-provider/openai/chat/AbstractOpenAIChatModel.d.ts +6 -1
  84. package/model-provider/openai/chat/AbstractOpenAIChatModel.js +19 -1
  85. package/model-provider/openai/chat/OpenAIChatFunctionCallStructureGenerationModel.d.ts +2 -2
  86. package/model-provider/openai/chat/OpenAIChatMessage.d.ts +2 -2
  87. package/model-provider/openai/chat/OpenAIChatModel.cjs +2 -3
  88. package/model-provider/openai/chat/OpenAIChatModel.d.ts +2 -2
  89. package/model-provider/openai/chat/OpenAIChatModel.js +2 -3
  90. package/model-provider/openai/chat/OpenAIChatPromptTemplate.cjs +0 -2
  91. package/model-provider/openai/chat/OpenAIChatPromptTemplate.d.ts +4 -4
  92. package/model-provider/openai/chat/OpenAIChatPromptTemplate.js +0 -2
  93. package/model-provider/openai-compatible/OpenAICompatibleChatModel.cjs +2 -3
  94. package/model-provider/openai-compatible/OpenAICompatibleChatModel.d.ts +2 -2
  95. package/model-provider/openai-compatible/OpenAICompatibleChatModel.js +2 -3
  96. package/model-provider/stability/StabilityImageGenerationModel.d.ts +6 -6
  97. package/package.json +2 -2
@@ -7,6 +7,7 @@ const postToApi_js_1 = require("../../core/api/postToApi.cjs");
7
7
  const ZodSchema_js_1 = require("../../core/schema/ZodSchema.cjs");
8
8
  const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
9
9
  const PromptTemplateTextStreamingModel_js_1 = require("../../model-function/generate-text/PromptTemplateTextStreamingModel.cjs");
10
+ const TextGenerationModel_js_1 = require("../../model-function/generate-text/TextGenerationModel.cjs");
10
11
  const AsyncQueue_js_1 = require("../../util/AsyncQueue.cjs");
11
12
  const parseJSON_js_1 = require("../../core/schema/parseJSON.cjs");
12
13
  const parseEventSourceStream_js_1 = require("../../util/streaming/parseEventSourceStream.cjs");
@@ -37,25 +38,52 @@ class LlamaCppTextGenerationModel extends AbstractModel_js_1.AbstractModel {
37
38
  return this.settings.contextWindowSize;
38
39
  }
39
40
  async callAPI(prompt, options) {
41
+ const api = this.settings.api ?? new LlamaCppApiConfiguration_js_1.LlamaCppApiConfiguration();
42
+ const responseFormat = options.responseFormat;
43
+ const abortSignal = options.run?.abortSignal;
40
44
  return (0, callWithRetryAndThrottle_js_1.callWithRetryAndThrottle)({
41
- retry: this.settings.api?.retry,
42
- throttle: this.settings.api?.throttle,
43
- call: async () => callLlamaCppTextGenerationAPI({
44
- ...this.settings,
45
- // mapping
46
- nPredict: this.settings.maxGenerationTokens,
47
- stop: this.settings.stopSequences,
48
- // other
49
- abortSignal: options.run?.abortSignal,
50
- prompt,
51
- responseFormat: options.responseFormat,
45
+ retry: api.retry,
46
+ throttle: api.throttle,
47
+ call: async () => (0, postToApi_js_1.postJsonToApi)({
48
+ url: api.assembleUrl(`/completion`),
49
+ headers: api.headers,
50
+ body: {
51
+ stream: responseFormat.stream,
52
+ prompt: prompt.text,
53
+ image_data: prompt.images != null
54
+ ? Object.entries(prompt.images).map(([id, data]) => ({
55
+ id: +id,
56
+ data,
57
+ }))
58
+ : undefined,
59
+ cache_prompt: this.settings.cachePrompt,
60
+ temperature: this.settings.temperature,
61
+ top_k: this.settings.topK,
62
+ top_p: this.settings.topP,
63
+ n_predict: this.settings.maxGenerationTokens,
64
+ n_keep: this.settings.nKeep,
65
+ stop: this.settings.stopSequences,
66
+ tfs_z: this.settings.tfsZ,
67
+ typical_p: this.settings.typicalP,
68
+ repeat_penalty: this.settings.repeatPenalty,
69
+ repeat_last_n: this.settings.repeatLastN,
70
+ penalize_nl: this.settings.penalizeNl,
71
+ mirostat: this.settings.mirostat,
72
+ mirostat_tau: this.settings.mirostatTau,
73
+ mirostat_eta: this.settings.mirostatEta,
74
+ seed: this.settings.seed,
75
+ ignore_eos: this.settings.ignoreEos,
76
+ logit_bias: this.settings.logitBias,
77
+ },
78
+ failedResponseHandler: LlamaCppError_js_1.failedLlamaCppCallResponseHandler,
79
+ successfulResponseHandler: responseFormat.handler,
80
+ abortSignal,
52
81
  }),
53
82
  });
54
83
  }
55
84
  get settingsForEvent() {
56
85
  const eventSettingProperties = [
57
- "maxGenerationTokens",
58
- "stopSequences",
86
+ ...TextGenerationModel_js_1.textGenerationModelProperties,
59
87
  "contextWindowSize",
60
88
  "cachePrompt",
61
89
  "temperature",
@@ -87,7 +115,16 @@ class LlamaCppTextGenerationModel extends AbstractModel_js_1.AbstractModel {
87
115
  });
88
116
  return {
89
117
  response,
90
- texts: [response.content],
118
+ textGenerationResults: [
119
+ {
120
+ text: response.content,
121
+ finishReason: response.stopped_eos || response.stopped_word
122
+ ? "stop"
123
+ : response.stopped_limit
124
+ ? "length"
125
+ : "unknown",
126
+ },
127
+ ],
91
128
  usage: {
92
129
  promptTokens: response.tokens_evaluated,
93
130
  completionTokens: response.tokens_predicted,
@@ -198,43 +235,6 @@ const llamaCppTextStreamingResponseSchema = new ZodSchema_js_1.ZodSchema(zod_1.z
198
235
  }),
199
236
  llamaCppTextGenerationResponseSchema,
200
237
  ]));
201
- async function callLlamaCppTextGenerationAPI({ api = new LlamaCppApiConfiguration_js_1.LlamaCppApiConfiguration(), abortSignal, responseFormat, prompt, cachePrompt, temperature, topK, topP, nPredict, nKeep, stop, tfsZ, typicalP, repeatPenalty, repeatLastN, penalizeNl, mirostat, mirostatTau, mirostatEta, seed, ignoreEos, logitBias, }) {
202
- return (0, postToApi_js_1.postJsonToApi)({
203
- url: api.assembleUrl(`/completion`),
204
- headers: api.headers,
205
- body: {
206
- stream: responseFormat.stream,
207
- prompt: prompt.text,
208
- cache_prompt: cachePrompt,
209
- temperature,
210
- top_k: topK,
211
- top_p: topP,
212
- n_predict: nPredict,
213
- n_keep: nKeep,
214
- stop,
215
- tfs_z: tfsZ,
216
- typical_p: typicalP,
217
- repeat_penalty: repeatPenalty,
218
- repeat_last_n: repeatLastN,
219
- penalize_nl: penalizeNl,
220
- mirostat,
221
- mirostat_tau: mirostatTau,
222
- mirostat_eta: mirostatEta,
223
- seed,
224
- ignore_eos: ignoreEos,
225
- logit_bias: logitBias,
226
- image_data: prompt.images != null
227
- ? Object.entries(prompt.images).map(([id, data]) => ({
228
- id: +id,
229
- data,
230
- }))
231
- : undefined,
232
- },
233
- failedResponseHandler: LlamaCppError_js_1.failedLlamaCppCallResponseHandler,
234
- successfulResponseHandler: responseFormat.handler,
235
- abortSignal,
236
- });
237
- }
238
238
  async function createLlamaCppFullDeltaIterableQueue(stream) {
239
239
  const queue = new AsyncQueue_js_1.AsyncQueue();
240
240
  let content = "";
@@ -58,16 +58,16 @@ export declare class LlamaCppTextGenerationModel<CONTEXT_WINDOW_SIZE extends num
58
58
  countPromptTokens(prompt: LlamaCppTextGenerationPrompt): Promise<number>;
59
59
  doGenerateTexts(prompt: LlamaCppTextGenerationPrompt, options?: FunctionOptions): Promise<{
60
60
  response: {
61
+ stop: true;
61
62
  model: string;
62
63
  prompt: string;
63
64
  content: string;
64
- stop: true;
65
65
  generation_settings: {
66
+ stop: string[];
66
67
  model: string;
67
68
  stream: boolean;
68
69
  seed: number;
69
70
  mirostat: number;
70
- stop: string[];
71
71
  frequency_penalty: number;
72
72
  ignore_eos: boolean;
73
73
  logit_bias: number[];
@@ -106,7 +106,10 @@ export declare class LlamaCppTextGenerationModel<CONTEXT_WINDOW_SIZE extends num
106
106
  tokens_predicted: number;
107
107
  truncated: boolean;
108
108
  };
109
- texts: string[];
109
+ textGenerationResults: {
110
+ text: string;
111
+ finishReason: "length" | "stop" | "unknown";
112
+ }[];
110
113
  usage: {
111
114
  promptTokens: number;
112
115
  completionTokens: number;
@@ -153,11 +156,11 @@ declare const llamaCppTextGenerationResponseSchema: z.ZodObject<{
153
156
  top_p: z.ZodNumber;
154
157
  typical_p: z.ZodNumber;
155
158
  }, "strip", z.ZodTypeAny, {
159
+ stop: string[];
156
160
  model: string;
157
161
  stream: boolean;
158
162
  seed: number;
159
163
  mirostat: number;
160
- stop: string[];
161
164
  frequency_penalty: number;
162
165
  ignore_eos: boolean;
163
166
  logit_bias: number[];
@@ -177,11 +180,11 @@ declare const llamaCppTextGenerationResponseSchema: z.ZodObject<{
177
180
  top_p: number;
178
181
  typical_p: number;
179
182
  }, {
183
+ stop: string[];
180
184
  model: string;
181
185
  stream: boolean;
182
186
  seed: number;
183
187
  mirostat: number;
184
- stop: string[];
185
188
  frequency_penalty: number;
186
189
  ignore_eos: boolean;
187
190
  logit_bias: number[];
@@ -240,16 +243,16 @@ declare const llamaCppTextGenerationResponseSchema: z.ZodObject<{
240
243
  tokens_predicted: z.ZodNumber;
241
244
  truncated: z.ZodBoolean;
242
245
  }, "strip", z.ZodTypeAny, {
246
+ stop: true;
243
247
  model: string;
244
248
  prompt: string;
245
249
  content: string;
246
- stop: true;
247
250
  generation_settings: {
251
+ stop: string[];
248
252
  model: string;
249
253
  stream: boolean;
250
254
  seed: number;
251
255
  mirostat: number;
252
- stop: string[];
253
256
  frequency_penalty: number;
254
257
  ignore_eos: boolean;
255
258
  logit_bias: number[];
@@ -288,16 +291,16 @@ declare const llamaCppTextGenerationResponseSchema: z.ZodObject<{
288
291
  tokens_predicted: number;
289
292
  truncated: boolean;
290
293
  }, {
294
+ stop: true;
291
295
  model: string;
292
296
  prompt: string;
293
297
  content: string;
294
- stop: true;
295
298
  generation_settings: {
299
+ stop: string[];
296
300
  model: string;
297
301
  stream: boolean;
298
302
  seed: number;
299
303
  mirostat: number;
300
- stop: string[];
301
304
  frequency_penalty: number;
302
305
  ignore_eos: boolean;
303
306
  logit_bias: number[];
@@ -353,16 +356,16 @@ export declare const LlamaCppTextGenerationResponseFormat: {
353
356
  json: {
354
357
  stream: false;
355
358
  handler: ResponseHandler<{
359
+ stop: true;
356
360
  model: string;
357
361
  prompt: string;
358
362
  content: string;
359
- stop: true;
360
363
  generation_settings: {
364
+ stop: string[];
361
365
  model: string;
362
366
  stream: boolean;
363
367
  seed: number;
364
368
  mirostat: number;
365
- stop: string[];
366
369
  frequency_penalty: number;
367
370
  ignore_eos: boolean;
368
371
  logit_bias: number[];
@@ -4,6 +4,7 @@ import { createJsonResponseHandler, postJsonToApi, } from "../../core/api/postTo
4
4
  import { ZodSchema } from "../../core/schema/ZodSchema.js";
5
5
  import { AbstractModel } from "../../model-function/AbstractModel.js";
6
6
  import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
7
+ import { textGenerationModelProperties, } from "../../model-function/generate-text/TextGenerationModel.js";
7
8
  import { AsyncQueue } from "../../util/AsyncQueue.js";
8
9
  import { parseJSON } from "../../core/schema/parseJSON.js";
9
10
  import { parseEventSourceStream } from "../../util/streaming/parseEventSourceStream.js";
@@ -34,25 +35,52 @@ export class LlamaCppTextGenerationModel extends AbstractModel {
34
35
  return this.settings.contextWindowSize;
35
36
  }
36
37
  async callAPI(prompt, options) {
38
+ const api = this.settings.api ?? new LlamaCppApiConfiguration();
39
+ const responseFormat = options.responseFormat;
40
+ const abortSignal = options.run?.abortSignal;
37
41
  return callWithRetryAndThrottle({
38
- retry: this.settings.api?.retry,
39
- throttle: this.settings.api?.throttle,
40
- call: async () => callLlamaCppTextGenerationAPI({
41
- ...this.settings,
42
- // mapping
43
- nPredict: this.settings.maxGenerationTokens,
44
- stop: this.settings.stopSequences,
45
- // other
46
- abortSignal: options.run?.abortSignal,
47
- prompt,
48
- responseFormat: options.responseFormat,
42
+ retry: api.retry,
43
+ throttle: api.throttle,
44
+ call: async () => postJsonToApi({
45
+ url: api.assembleUrl(`/completion`),
46
+ headers: api.headers,
47
+ body: {
48
+ stream: responseFormat.stream,
49
+ prompt: prompt.text,
50
+ image_data: prompt.images != null
51
+ ? Object.entries(prompt.images).map(([id, data]) => ({
52
+ id: +id,
53
+ data,
54
+ }))
55
+ : undefined,
56
+ cache_prompt: this.settings.cachePrompt,
57
+ temperature: this.settings.temperature,
58
+ top_k: this.settings.topK,
59
+ top_p: this.settings.topP,
60
+ n_predict: this.settings.maxGenerationTokens,
61
+ n_keep: this.settings.nKeep,
62
+ stop: this.settings.stopSequences,
63
+ tfs_z: this.settings.tfsZ,
64
+ typical_p: this.settings.typicalP,
65
+ repeat_penalty: this.settings.repeatPenalty,
66
+ repeat_last_n: this.settings.repeatLastN,
67
+ penalize_nl: this.settings.penalizeNl,
68
+ mirostat: this.settings.mirostat,
69
+ mirostat_tau: this.settings.mirostatTau,
70
+ mirostat_eta: this.settings.mirostatEta,
71
+ seed: this.settings.seed,
72
+ ignore_eos: this.settings.ignoreEos,
73
+ logit_bias: this.settings.logitBias,
74
+ },
75
+ failedResponseHandler: failedLlamaCppCallResponseHandler,
76
+ successfulResponseHandler: responseFormat.handler,
77
+ abortSignal,
49
78
  }),
50
79
  });
51
80
  }
52
81
  get settingsForEvent() {
53
82
  const eventSettingProperties = [
54
- "maxGenerationTokens",
55
- "stopSequences",
83
+ ...textGenerationModelProperties,
56
84
  "contextWindowSize",
57
85
  "cachePrompt",
58
86
  "temperature",
@@ -84,7 +112,16 @@ export class LlamaCppTextGenerationModel extends AbstractModel {
84
112
  });
85
113
  return {
86
114
  response,
87
- texts: [response.content],
115
+ textGenerationResults: [
116
+ {
117
+ text: response.content,
118
+ finishReason: response.stopped_eos || response.stopped_word
119
+ ? "stop"
120
+ : response.stopped_limit
121
+ ? "length"
122
+ : "unknown",
123
+ },
124
+ ],
88
125
  usage: {
89
126
  promptTokens: response.tokens_evaluated,
90
127
  completionTokens: response.tokens_predicted,
@@ -194,43 +231,6 @@ const llamaCppTextStreamingResponseSchema = new ZodSchema(z.discriminatedUnion("
194
231
  }),
195
232
  llamaCppTextGenerationResponseSchema,
196
233
  ]));
197
- async function callLlamaCppTextGenerationAPI({ api = new LlamaCppApiConfiguration(), abortSignal, responseFormat, prompt, cachePrompt, temperature, topK, topP, nPredict, nKeep, stop, tfsZ, typicalP, repeatPenalty, repeatLastN, penalizeNl, mirostat, mirostatTau, mirostatEta, seed, ignoreEos, logitBias, }) {
198
- return postJsonToApi({
199
- url: api.assembleUrl(`/completion`),
200
- headers: api.headers,
201
- body: {
202
- stream: responseFormat.stream,
203
- prompt: prompt.text,
204
- cache_prompt: cachePrompt,
205
- temperature,
206
- top_k: topK,
207
- top_p: topP,
208
- n_predict: nPredict,
209
- n_keep: nKeep,
210
- stop,
211
- tfs_z: tfsZ,
212
- typical_p: typicalP,
213
- repeat_penalty: repeatPenalty,
214
- repeat_last_n: repeatLastN,
215
- penalize_nl: penalizeNl,
216
- mirostat,
217
- mirostat_tau: mirostatTau,
218
- mirostat_eta: mirostatEta,
219
- seed,
220
- ignore_eos: ignoreEos,
221
- logit_bias: logitBias,
222
- image_data: prompt.images != null
223
- ? Object.entries(prompt.images).map(([id, data]) => ({
224
- id: +id,
225
- data,
226
- }))
227
- : undefined,
228
- },
229
- failedResponseHandler: failedLlamaCppCallResponseHandler,
230
- successfulResponseHandler: responseFormat.handler,
231
- abortSignal,
232
- });
233
- }
234
234
  async function createLlamaCppFullDeltaIterableQueue(stream) {
235
235
  const queue = new AsyncQueue();
236
236
  let content = "";
@@ -75,6 +75,9 @@ class MistralChatModel extends AbstractModel_js_1.AbstractModel {
75
75
  get settingsForEvent() {
76
76
  const eventSettingProperties = [
77
77
  "maxGenerationTokens",
78
+ "stopSequences",
79
+ "numberOfGenerations",
80
+ "trimWhitespace",
78
81
  "temperature",
79
82
  "topP",
80
83
  "safeMode",
@@ -89,9 +92,23 @@ class MistralChatModel extends AbstractModel_js_1.AbstractModel {
89
92
  });
90
93
  return {
91
94
  response,
92
- texts: response.choices.map((choice) => choice.message.content),
95
+ textGenerationResults: response.choices.map((choice) => ({
96
+ text: choice.message.content,
97
+ finishReason: this.translateFinishReason(choice.finish_reason),
98
+ })),
93
99
  };
94
100
  }
101
+ translateFinishReason(finishReason) {
102
+ switch (finishReason) {
103
+ case "stop":
104
+ return "stop";
105
+ case "length":
106
+ case "model_length":
107
+ return "length";
108
+ default:
109
+ return "unknown";
110
+ }
111
+ }
95
112
  doStreamText(prompt, options) {
96
113
  return this.callAPI(prompt, {
97
114
  ...options,
@@ -118,7 +135,7 @@ class MistralChatModel extends AbstractModel_js_1.AbstractModel {
118
135
  }
119
136
  withPromptTemplate(promptTemplate) {
120
137
  return new PromptTemplateTextStreamingModel_js_1.PromptTemplateTextStreamingModel({
121
- model: this,
138
+ model: this, // stop tokens are not supported by this model
122
139
  promptTemplate,
123
140
  });
124
141
  }
@@ -7,6 +7,7 @@ import { Delta } from "../../model-function/Delta.js";
7
7
  import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
8
8
  import { TextGenerationModelSettings, TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
9
9
  import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
10
+ import { TextGenerationFinishReason } from "../../model-function/generate-text/TextGenerationResult.js";
10
11
  export type MistralChatMessage = {
11
12
  role: "system" | "user" | "assistant";
12
13
  content: string;
@@ -78,8 +79,12 @@ export declare class MistralChatModel extends AbstractModel<MistralChatModelSett
78
79
  index: number;
79
80
  }[];
80
81
  };
81
- texts: string[];
82
+ textGenerationResults: {
83
+ text: string;
84
+ finishReason: TextGenerationFinishReason;
85
+ }[];
82
86
  }>;
87
+ private translateFinishReason;
83
88
  doStreamText(prompt: MistralChatPrompt, options?: FunctionOptions): Promise<AsyncIterable<Delta<string>>>;
84
89
  /**
85
90
  * Returns this model with a text prompt template.
@@ -88,11 +93,11 @@ export declare class MistralChatModel extends AbstractModel<MistralChatModelSett
88
93
  /**
89
94
  * Returns this model with an instruction prompt template.
90
95
  */
91
- withInstructionPrompt(): PromptTemplateTextStreamingModel<import("../../index.js").TextInstructionPrompt, MistralChatPrompt, MistralChatModelSettings, this>;
96
+ withInstructionPrompt(): PromptTemplateTextStreamingModel<import("../../index.js").InstructionPrompt, MistralChatPrompt, MistralChatModelSettings, this>;
92
97
  /**
93
98
  * Returns this model with a chat prompt template.
94
99
  */
95
- withChatPrompt(): PromptTemplateTextStreamingModel<import("../../index.js").TextChatPrompt, MistralChatPrompt, MistralChatModelSettings, this>;
100
+ withChatPrompt(): PromptTemplateTextStreamingModel<import("../../index.js").ChatPrompt, MistralChatPrompt, MistralChatModelSettings, this>;
96
101
  withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, MistralChatPrompt>): PromptTemplateTextStreamingModel<INPUT_PROMPT, MistralChatPrompt, MistralChatModelSettings, this>;
97
102
  withSettings(additionalSettings: Partial<MistralChatModelSettings>): this;
98
103
  }
@@ -72,6 +72,9 @@ export class MistralChatModel extends AbstractModel {
72
72
  get settingsForEvent() {
73
73
  const eventSettingProperties = [
74
74
  "maxGenerationTokens",
75
+ "stopSequences",
76
+ "numberOfGenerations",
77
+ "trimWhitespace",
75
78
  "temperature",
76
79
  "topP",
77
80
  "safeMode",
@@ -86,9 +89,23 @@ export class MistralChatModel extends AbstractModel {
86
89
  });
87
90
  return {
88
91
  response,
89
- texts: response.choices.map((choice) => choice.message.content),
92
+ textGenerationResults: response.choices.map((choice) => ({
93
+ text: choice.message.content,
94
+ finishReason: this.translateFinishReason(choice.finish_reason),
95
+ })),
90
96
  };
91
97
  }
98
+ translateFinishReason(finishReason) {
99
+ switch (finishReason) {
100
+ case "stop":
101
+ return "stop";
102
+ case "length":
103
+ case "model_length":
104
+ return "length";
105
+ default:
106
+ return "unknown";
107
+ }
108
+ }
92
109
  doStreamText(prompt, options) {
93
110
  return this.callAPI(prompt, {
94
111
  ...options,
@@ -115,7 +132,7 @@ export class MistralChatModel extends AbstractModel {
115
132
  }
116
133
  withPromptTemplate(promptTemplate) {
117
134
  return new PromptTemplateTextStreamingModel({
118
- model: this,
135
+ model: this, // stop tokens are not supported by this model
119
136
  promptTemplate,
120
137
  });
121
138
  }
@@ -1,7 +1,7 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.chat = exports.instruction = exports.text = void 0;
4
- const ChatPrompt_js_1 = require("../../model-function/generate-text/prompt-template/ChatPrompt.cjs");
4
+ const Content_js_1 = require("../../model-function/generate-text/prompt-template/Content.cjs");
5
5
  /**
6
6
  * Formats a text prompt as a Mistral prompt.
7
7
  */
@@ -22,7 +22,8 @@ function instruction() {
22
22
  if (prompt.system != null) {
23
23
  messages.push({ role: "system", content: prompt.system });
24
24
  }
25
- messages.push({ role: "user", content: prompt.instruction });
25
+ const instruction = (0, Content_js_1.validateContentIsString)(prompt.instruction, prompt);
26
+ messages.push({ role: "user", content: instruction });
26
27
  return messages;
27
28
  },
28
29
  stopSequences: [],
@@ -35,7 +36,6 @@ exports.instruction = instruction;
35
36
  function chat() {
36
37
  return {
37
38
  format(prompt) {
38
- (0, ChatPrompt_js_1.validateChatPrompt)(prompt);
39
39
  const messages = [];
40
40
  if (prompt.system != null) {
41
41
  messages.push({ role: "system", content: prompt.system });
@@ -43,7 +43,8 @@ function chat() {
43
43
  for (const { role, content } of prompt.messages) {
44
44
  switch (role) {
45
45
  case "user": {
46
- messages.push({ role: "user", content });
46
+ const textContent = (0, Content_js_1.validateContentIsString)(content, prompt);
47
+ messages.push({ role: "user", content: textContent });
47
48
  break;
48
49
  }
49
50
  case "assistant": {
@@ -1,6 +1,6 @@
1
1
  import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
2
- import { TextChatPrompt } from "../../model-function/generate-text/prompt-template/ChatPrompt.js";
3
- import { TextInstructionPrompt } from "../../model-function/generate-text/prompt-template/InstructionPrompt.js";
2
+ import { ChatPrompt } from "../../model-function/generate-text/prompt-template/ChatPrompt.js";
3
+ import { InstructionPrompt } from "../../model-function/generate-text/prompt-template/InstructionPrompt.js";
4
4
  import { MistralChatPrompt } from "./MistralChatModel.js";
5
5
  /**
6
6
  * Formats a text prompt as a Mistral prompt.
@@ -9,8 +9,8 @@ export declare function text(): TextGenerationPromptTemplate<string, MistralChat
9
9
  /**
10
10
  * Formats an instruction prompt as a Mistral prompt.
11
11
  */
12
- export declare function instruction(): TextGenerationPromptTemplate<TextInstructionPrompt, MistralChatPrompt>;
12
+ export declare function instruction(): TextGenerationPromptTemplate<InstructionPrompt, MistralChatPrompt>;
13
13
  /**
14
14
  * Formats a chat prompt as a Mistral prompt.
15
15
  */
16
- export declare function chat(): TextGenerationPromptTemplate<TextChatPrompt, MistralChatPrompt>;
16
+ export declare function chat(): TextGenerationPromptTemplate<ChatPrompt, MistralChatPrompt>;
@@ -1,4 +1,4 @@
1
- import { validateChatPrompt, } from "../../model-function/generate-text/prompt-template/ChatPrompt.js";
1
+ import { validateContentIsString } from "../../model-function/generate-text/prompt-template/Content.js";
2
2
  /**
3
3
  * Formats a text prompt as a Mistral prompt.
4
4
  */
@@ -18,7 +18,8 @@ export function instruction() {
18
18
  if (prompt.system != null) {
19
19
  messages.push({ role: "system", content: prompt.system });
20
20
  }
21
- messages.push({ role: "user", content: prompt.instruction });
21
+ const instruction = validateContentIsString(prompt.instruction, prompt);
22
+ messages.push({ role: "user", content: instruction });
22
23
  return messages;
23
24
  },
24
25
  stopSequences: [],
@@ -30,7 +31,6 @@ export function instruction() {
30
31
  export function chat() {
31
32
  return {
32
33
  format(prompt) {
33
- validateChatPrompt(prompt);
34
34
  const messages = [];
35
35
  if (prompt.system != null) {
36
36
  messages.push({ role: "system", content: prompt.system });
@@ -38,7 +38,8 @@ export function chat() {
38
38
  for (const { role, content } of prompt.messages) {
39
39
  switch (role) {
40
40
  case "user": {
41
- messages.push({ role: "user", content });
41
+ const textContent = validateContentIsString(content, prompt);
42
+ messages.push({ role: "user", content: textContent });
42
43
  break;
43
44
  }
44
45
  case "assistant": {
@@ -9,6 +9,7 @@ const ZodSchema_js_1 = require("../../core/schema/ZodSchema.cjs");
9
9
  const parseJSON_js_1 = require("../../core/schema/parseJSON.cjs");
10
10
  const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
11
11
  const PromptTemplateTextStreamingModel_js_1 = require("../../model-function/generate-text/PromptTemplateTextStreamingModel.cjs");
12
+ const TextGenerationModel_js_1 = require("../../model-function/generate-text/TextGenerationModel.cjs");
12
13
  const TextGenerationToolCallModel_js_1 = require("../../tool/generate-tool-call/TextGenerationToolCallModel.cjs");
13
14
  const TextGenerationToolCallsOrGenerateTextModel_js_1 = require("../../tool/generate-tool-calls-or-text/TextGenerationToolCallsOrGenerateTextModel.cjs");
14
15
  const AsyncQueue_js_1 = require("../../util/AsyncQueue.cjs");
@@ -92,8 +93,7 @@ class OllamaChatModel extends AbstractModel_js_1.AbstractModel {
92
93
  }
93
94
  get settingsForEvent() {
94
95
  const eventSettingProperties = [
95
- "maxGenerationTokens",
96
- "stopSequences",
96
+ ...TextGenerationModel_js_1.textGenerationModelProperties,
97
97
  "temperature",
98
98
  "mirostat",
99
99
  "mirostatEta",
@@ -119,7 +119,12 @@ class OllamaChatModel extends AbstractModel_js_1.AbstractModel {
119
119
  });
120
120
  return {
121
121
  response,
122
- texts: [response.message.content],
122
+ textGenerationResults: [
123
+ {
124
+ text: response.message.content,
125
+ finishReason: "unknown",
126
+ },
127
+ ],
123
128
  };
124
129
  }
125
130
  doStreamText(prompt, options) {
@@ -52,7 +52,10 @@ export declare class OllamaChatModel extends AbstractModel<OllamaChatModelSettin
52
52
  load_duration?: number | undefined;
53
53
  prompt_eval_duration?: number | undefined;
54
54
  };
55
- texts: string[];
55
+ textGenerationResults: {
56
+ text: string;
57
+ finishReason: "unknown";
58
+ }[];
56
59
  }>;
57
60
  doStreamText(prompt: OllamaChatPrompt, options?: FunctionOptions): Promise<AsyncIterable<Delta<string>>>;
58
61
  asToolCallGenerationModel<INPUT_PROMPT>(promptTemplate: ToolCallPromptTemplate<INPUT_PROMPT, OllamaChatPrompt>): TextGenerationToolCallModel<INPUT_PROMPT, OllamaChatPrompt, this>;
@@ -64,11 +67,11 @@ export declare class OllamaChatModel extends AbstractModel<OllamaChatModelSettin
64
67
  /**
65
68
  * Returns this model with an instruction prompt template.
66
69
  */
67
- withInstructionPrompt(): PromptTemplateTextStreamingModel<import("../../index.js").MultiModalInstructionPrompt | import("../../index.js").TextInstructionPrompt, OllamaChatPrompt, OllamaChatModelSettings, this>;
70
+ withInstructionPrompt(): PromptTemplateTextStreamingModel<import("../../index.js").InstructionPrompt, OllamaChatPrompt, OllamaChatModelSettings, this>;
68
71
  /**
69
72
  * Returns this model with a chat prompt template.
70
73
  */
71
- withChatPrompt(): PromptTemplateTextStreamingModel<import("../../index.js").TextChatPrompt | import("../../index.js").MultiModalChatPrompt, OllamaChatPrompt, OllamaChatModelSettings, this>;
74
+ withChatPrompt(): PromptTemplateTextStreamingModel<import("../../index.js").ChatPrompt, OllamaChatPrompt, OllamaChatModelSettings, this>;
72
75
  withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, OllamaChatPrompt>): PromptTemplateTextStreamingModel<INPUT_PROMPT, OllamaChatPrompt, OllamaChatModelSettings, this>;
73
76
  withSettings(additionalSettings: Partial<OllamaChatModelSettings>): this;
74
77
  }