modelfusion 0.98.0 → 0.100.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (93) hide show
  1. package/README.md +13 -19
  2. package/composed-function/summarize/summarizeRecursivelyWithTextGenerationAndTokenSplitting.cjs +1 -1
  3. package/composed-function/summarize/summarizeRecursivelyWithTextGenerationAndTokenSplitting.js +1 -1
  4. package/guard/fixStructure.cjs +3 -3
  5. package/guard/fixStructure.d.ts +3 -3
  6. package/guard/fixStructure.js +3 -3
  7. package/model-function/Model.d.ts +2 -2
  8. package/model-function/generate-structure/generateStructure.d.ts +2 -2
  9. package/model-function/generate-structure/streamStructure.d.ts +1 -1
  10. package/model-function/generate-text/PromptTemplateTextGenerationModel.cjs +2 -2
  11. package/model-function/generate-text/PromptTemplateTextGenerationModel.d.ts +2 -2
  12. package/model-function/generate-text/PromptTemplateTextGenerationModel.js +2 -2
  13. package/model-function/generate-text/TextGenerationModel.d.ts +31 -5
  14. package/model-function/generate-text/generateText.cjs +10 -4
  15. package/model-function/generate-text/generateText.d.ts +1 -0
  16. package/model-function/generate-text/generateText.js +10 -4
  17. package/model-function/generate-text/prompt-template/trimChatPrompt.cjs +1 -1
  18. package/model-function/generate-text/prompt-template/trimChatPrompt.js +1 -1
  19. package/model-provider/anthropic/AnthropicTextGenerationModel.cjs +27 -31
  20. package/model-provider/anthropic/AnthropicTextGenerationModel.d.ts +2 -2
  21. package/model-provider/anthropic/AnthropicTextGenerationModel.js +27 -31
  22. package/model-provider/cohere/CohereFacade.cjs +1 -1
  23. package/model-provider/cohere/CohereFacade.d.ts +1 -1
  24. package/model-provider/cohere/CohereFacade.js +1 -1
  25. package/model-provider/cohere/CohereTextEmbeddingModel.d.ts +3 -3
  26. package/model-provider/cohere/CohereTextGenerationModel.cjs +34 -43
  27. package/model-provider/cohere/CohereTextGenerationModel.d.ts +3 -4
  28. package/model-provider/cohere/CohereTextGenerationModel.js +34 -43
  29. package/model-provider/huggingface/HuggingFaceFacade.cjs +1 -1
  30. package/model-provider/huggingface/HuggingFaceFacade.d.ts +1 -1
  31. package/model-provider/huggingface/HuggingFaceFacade.js +1 -1
  32. package/model-provider/huggingface/HuggingFaceTextGenerationModel.cjs +31 -41
  33. package/model-provider/huggingface/HuggingFaceTextGenerationModel.d.ts +3 -4
  34. package/model-provider/huggingface/HuggingFaceTextGenerationModel.js +31 -41
  35. package/model-provider/llamacpp/LlamaCppTextGenerationModel.cjs +4 -4
  36. package/model-provider/llamacpp/LlamaCppTextGenerationModel.d.ts +2 -2
  37. package/model-provider/llamacpp/LlamaCppTextGenerationModel.js +4 -4
  38. package/model-provider/mistral/{MistralTextGenerationModel.cjs → MistralChatModel.cjs} +18 -18
  39. package/model-provider/mistral/{MistralTextGenerationModel.d.ts → MistralChatModel.d.ts} +22 -21
  40. package/model-provider/mistral/{MistralTextGenerationModel.js → MistralChatModel.js} +16 -16
  41. package/model-provider/mistral/MistralFacade.cjs +5 -5
  42. package/model-provider/mistral/MistralFacade.d.ts +3 -2
  43. package/model-provider/mistral/MistralFacade.js +3 -3
  44. package/model-provider/mistral/MistralPromptTemplate.d.ts +4 -4
  45. package/model-provider/mistral/index.cjs +1 -1
  46. package/model-provider/mistral/index.d.ts +1 -1
  47. package/model-provider/mistral/index.js +1 -1
  48. package/model-provider/ollama/OllamaApiConfiguration.d.ts +6 -5
  49. package/model-provider/ollama/OllamaChatModel.cjs +303 -0
  50. package/model-provider/ollama/OllamaChatModel.d.ts +171 -0
  51. package/model-provider/ollama/OllamaChatModel.js +299 -0
  52. package/model-provider/ollama/OllamaChatPromptTemplate.cjs +76 -0
  53. package/model-provider/ollama/OllamaChatPromptTemplate.d.ts +20 -0
  54. package/model-provider/ollama/OllamaChatPromptTemplate.js +69 -0
  55. package/model-provider/ollama/{OllamaTextGenerationModel.cjs → OllamaCompletionModel.cjs} +17 -15
  56. package/model-provider/ollama/OllamaCompletionModel.d.ts +159 -0
  57. package/model-provider/ollama/{OllamaTextGenerationModel.js → OllamaCompletionModel.js} +15 -13
  58. package/model-provider/ollama/{OllamaTextGenerationModel.test.cjs → OllamaCompletionModel.test.cjs} +3 -3
  59. package/model-provider/ollama/{OllamaTextGenerationModel.test.js → OllamaCompletionModel.test.js} +3 -3
  60. package/model-provider/ollama/OllamaFacade.cjs +15 -5
  61. package/model-provider/ollama/OllamaFacade.d.ts +7 -2
  62. package/model-provider/ollama/OllamaFacade.js +11 -3
  63. package/model-provider/ollama/OllamaTextGenerationSettings.cjs +2 -0
  64. package/model-provider/ollama/OllamaTextGenerationSettings.d.ts +87 -0
  65. package/model-provider/ollama/OllamaTextGenerationSettings.js +1 -0
  66. package/model-provider/ollama/index.cjs +4 -1
  67. package/model-provider/ollama/index.d.ts +4 -1
  68. package/model-provider/ollama/index.js +4 -1
  69. package/model-provider/openai/OpenAICompletionModel.cjs +48 -53
  70. package/model-provider/openai/OpenAICompletionModel.d.ts +3 -6
  71. package/model-provider/openai/OpenAICompletionModel.js +48 -53
  72. package/model-provider/openai/OpenAIFacade.cjs +6 -4
  73. package/model-provider/openai/OpenAIFacade.d.ts +5 -3
  74. package/model-provider/openai/OpenAIFacade.js +4 -3
  75. package/model-provider/openai/chat/AbstractOpenAIChatModel.cjs +50 -54
  76. package/model-provider/openai/chat/AbstractOpenAIChatModel.d.ts +7 -28
  77. package/model-provider/openai/chat/AbstractOpenAIChatModel.js +50 -54
  78. package/model-provider/openai/chat/OpenAIChatFunctionCallStructureGenerationModel.d.ts +3 -3
  79. package/model-provider/openai/chat/OpenAIChatModel.cjs +4 -4
  80. package/model-provider/openai/chat/OpenAIChatModel.d.ts +3 -3
  81. package/model-provider/openai/chat/OpenAIChatModel.js +4 -4
  82. package/model-provider/openai/chat/OpenAIChatModel.test.cjs +1 -1
  83. package/model-provider/openai/chat/OpenAIChatModel.test.js +1 -1
  84. package/model-provider/openai/chat/OpenAIChatPromptTemplate.d.ts +5 -5
  85. package/model-provider/openai/chat/OpenAIChatPromptTemplate.js +1 -1
  86. package/model-provider/openai-compatible/OpenAICompatibleChatModel.cjs +2 -2
  87. package/model-provider/openai-compatible/OpenAICompatibleChatModel.js +2 -2
  88. package/model-provider/openai-compatible/OpenAICompatibleFacade.cjs +2 -2
  89. package/model-provider/openai-compatible/OpenAICompatibleFacade.d.ts +2 -2
  90. package/model-provider/openai-compatible/OpenAICompatibleFacade.js +2 -2
  91. package/package.json +1 -1
  92. package/model-provider/ollama/OllamaTextGenerationModel.d.ts +0 -230
  93. /package/model-provider/ollama/{OllamaTextGenerationModel.test.d.ts → OllamaCompletionModel.test.d.ts} +0 -0
@@ -40,7 +40,7 @@ export class LlamaCppTextGenerationModel extends AbstractModel {
40
40
  call: async () => callLlamaCppTextGenerationAPI({
41
41
  ...this.settings,
42
42
  // mapping
43
- nPredict: this.settings.maxCompletionTokens,
43
+ nPredict: this.settings.maxGenerationTokens,
44
44
  stop: this.settings.stopSequences,
45
45
  // other
46
46
  abortSignal: options.run?.abortSignal,
@@ -51,7 +51,7 @@ export class LlamaCppTextGenerationModel extends AbstractModel {
51
51
  }
52
52
  get settingsForEvent() {
53
53
  const eventSettingProperties = [
54
- "maxCompletionTokens",
54
+ "maxGenerationTokens",
55
55
  "stopSequences",
56
56
  "contextWindowSize",
57
57
  "cachePrompt",
@@ -77,14 +77,14 @@ export class LlamaCppTextGenerationModel extends AbstractModel {
77
77
  const tokens = await this.tokenizer.tokenize(prompt.text);
78
78
  return tokens.length;
79
79
  }
80
- async doGenerateText(prompt, options) {
80
+ async doGenerateTexts(prompt, options) {
81
81
  const response = await this.callAPI(prompt, {
82
82
  ...options,
83
83
  responseFormat: LlamaCppTextGenerationResponseFormat.json,
84
84
  });
85
85
  return {
86
86
  response,
87
- text: response.content,
87
+ texts: [response.content],
88
88
  usage: {
89
89
  promptTokens: response.tokens_evaluated,
90
90
  completionTokens: response.tokens_predicted,
@@ -1,6 +1,6 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.MistralTextGenerationResponseFormat = exports.MistralTextGenerationModel = void 0;
3
+ exports.MistralChatResponseFormat = exports.MistralChatModel = void 0;
4
4
  const zod_1 = require("zod");
5
5
  const callWithRetryAndThrottle_js_1 = require("../../core/api/callWithRetryAndThrottle.cjs");
6
6
  const postToApi_js_1 = require("../../core/api/postToApi.cjs");
@@ -13,7 +13,7 @@ const parseEventSourceStream_js_1 = require("../../util/streaming/parseEventSour
13
13
  const MistralApiConfiguration_js_1 = require("./MistralApiConfiguration.cjs");
14
14
  const MistralError_js_1 = require("./MistralError.cjs");
15
15
  const MistralPromptTemplate_js_1 = require("./MistralPromptTemplate.cjs");
16
- class MistralTextGenerationModel extends AbstractModel_js_1.AbstractModel {
16
+ class MistralChatModel extends AbstractModel_js_1.AbstractModel {
17
17
  constructor(settings) {
18
18
  super({ settings });
19
19
  Object.defineProperty(this, "provider", {
@@ -45,7 +45,7 @@ class MistralTextGenerationModel extends AbstractModel_js_1.AbstractModel {
45
45
  return this.settings.model;
46
46
  }
47
47
  async callAPI(prompt, options) {
48
- const { model, temperature, topP, safeMode, randomSeed, maxCompletionTokens, } = this.settings;
48
+ const { model, temperature, topP, safeMode, randomSeed, maxGenerationTokens, } = this.settings;
49
49
  const api = this.settings.api ?? new MistralApiConfiguration_js_1.MistralApiConfiguration();
50
50
  const abortSignal = options.run?.abortSignal;
51
51
  const stream = options.responseFormat.stream;
@@ -62,7 +62,7 @@ class MistralTextGenerationModel extends AbstractModel_js_1.AbstractModel {
62
62
  model,
63
63
  temperature,
64
64
  top_p: topP,
65
- max_tokens: maxCompletionTokens,
65
+ max_tokens: maxGenerationTokens,
66
66
  safe_mode: safeMode,
67
67
  random_seed: randomSeed,
68
68
  },
@@ -74,7 +74,7 @@ class MistralTextGenerationModel extends AbstractModel_js_1.AbstractModel {
74
74
  }
75
75
  get settingsForEvent() {
76
76
  const eventSettingProperties = [
77
- "maxCompletionTokens",
77
+ "maxGenerationTokens",
78
78
  "temperature",
79
79
  "topP",
80
80
  "safeMode",
@@ -82,20 +82,20 @@ class MistralTextGenerationModel extends AbstractModel_js_1.AbstractModel {
82
82
  ];
83
83
  return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
84
84
  }
85
- async doGenerateText(prompt, options) {
85
+ async doGenerateTexts(prompt, options) {
86
86
  const response = await this.callAPI(prompt, {
87
87
  ...options,
88
- responseFormat: exports.MistralTextGenerationResponseFormat.json,
88
+ responseFormat: exports.MistralChatResponseFormat.json,
89
89
  });
90
90
  return {
91
91
  response,
92
- text: response.choices[0].message.content,
92
+ texts: response.choices.map((choice) => choice.message.content),
93
93
  };
94
94
  }
95
95
  doStreamText(prompt, options) {
96
96
  return this.callAPI(prompt, {
97
97
  ...options,
98
- responseFormat: exports.MistralTextGenerationResponseFormat.textDeltaIterable,
98
+ responseFormat: exports.MistralChatResponseFormat.textDeltaIterable,
99
99
  });
100
100
  }
101
101
  /**
@@ -123,11 +123,11 @@ class MistralTextGenerationModel extends AbstractModel_js_1.AbstractModel {
123
123
  });
124
124
  }
125
125
  withSettings(additionalSettings) {
126
- return new MistralTextGenerationModel(Object.assign({}, this.settings, additionalSettings));
126
+ return new MistralChatModel(Object.assign({}, this.settings, additionalSettings));
127
127
  }
128
128
  }
129
- exports.MistralTextGenerationModel = MistralTextGenerationModel;
130
- const mistralTextGenerationResponseSchema = zod_1.z.object({
129
+ exports.MistralChatModel = MistralChatModel;
130
+ const mistralChatResponseSchema = zod_1.z.object({
131
131
  id: zod_1.z.string(),
132
132
  object: zod_1.z.string(),
133
133
  created: zod_1.z.number(),
@@ -146,23 +146,23 @@ const mistralTextGenerationResponseSchema = zod_1.z.object({
146
146
  total_tokens: zod_1.z.number(),
147
147
  }),
148
148
  });
149
- exports.MistralTextGenerationResponseFormat = {
149
+ exports.MistralChatResponseFormat = {
150
150
  /**
151
151
  * Returns the response as a JSON object.
152
152
  */
153
153
  json: {
154
154
  stream: false,
155
- handler: (0, postToApi_js_1.createJsonResponseHandler)(mistralTextGenerationResponseSchema),
155
+ handler: (0, postToApi_js_1.createJsonResponseHandler)(mistralChatResponseSchema),
156
156
  },
157
157
  /**
158
158
  * Returns an async iterable over the text deltas (only the tex different of the first choice).
159
159
  */
160
160
  textDeltaIterable: {
161
161
  stream: true,
162
- handler: async ({ response }) => createMistralTextGenerationDeltaIterableQueue(response.body, (delta) => delta[0]?.delta.content ?? ""),
162
+ handler: async ({ response }) => createMistralChatDeltaIterableQueue(response.body, (delta) => delta[0]?.delta.content ?? ""),
163
163
  },
164
164
  };
165
- const mistralTextGenerationChunkSchema = new ZodSchema_js_1.ZodSchema(zod_1.z.object({
165
+ const mistralChatChunkSchema = new ZodSchema_js_1.ZodSchema(zod_1.z.object({
166
166
  id: zod_1.z.string(),
167
167
  object: zod_1.z.string().optional(),
168
168
  created: zod_1.z.number().optional(),
@@ -179,7 +179,7 @@ const mistralTextGenerationChunkSchema = new ZodSchema_js_1.ZodSchema(zod_1.z.ob
179
179
  .optional(),
180
180
  })),
181
181
  }));
182
- async function createMistralTextGenerationDeltaIterableQueue(stream, extractDeltaValue) {
182
+ async function createMistralChatDeltaIterableQueue(stream, extractDeltaValue) {
183
183
  const queue = new AsyncQueue_js_1.AsyncQueue();
184
184
  const streamDelta = [];
185
185
  // process the stream asynchonously (no 'await' on purpose):
@@ -194,7 +194,7 @@ async function createMistralTextGenerationDeltaIterableQueue(stream, extractDelt
194
194
  }
195
195
  const parseResult = (0, parseJSON_js_1.safeParseJSON)({
196
196
  text: data,
197
- schema: mistralTextGenerationChunkSchema,
197
+ schema: mistralChatChunkSchema,
198
198
  });
199
199
  if (!parseResult.success) {
200
200
  queue.push({
@@ -7,11 +7,12 @@ import { Delta } from "../../model-function/Delta.js";
7
7
  import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
8
8
  import { TextGenerationModelSettings, TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
9
9
  import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
10
- export type MistralTextGenerationPrompt = Array<{
10
+ export type MistralChatMessage = {
11
11
  role: "system" | "user" | "assistant";
12
12
  content: string;
13
- }>;
14
- export interface MistralTextGenerationModelSettings extends TextGenerationModelSettings {
13
+ };
14
+ export type MistralChatPrompt = Array<MistralChatMessage>;
15
+ export interface MistralChatModelSettings extends TextGenerationModelSettings {
15
16
  api?: ApiConfiguration;
16
17
  model: "mistral-tiny" | "mistral-small" | "mistral-medium";
17
18
  /**
@@ -46,18 +47,18 @@ export interface MistralTextGenerationModelSettings extends TextGenerationModelS
46
47
  */
47
48
  randomSeed?: number | null;
48
49
  }
49
- export declare class MistralTextGenerationModel extends AbstractModel<MistralTextGenerationModelSettings> implements TextStreamingModel<MistralTextGenerationPrompt, MistralTextGenerationModelSettings> {
50
- constructor(settings: MistralTextGenerationModelSettings);
50
+ export declare class MistralChatModel extends AbstractModel<MistralChatModelSettings> implements TextStreamingModel<MistralChatPrompt, MistralChatModelSettings> {
51
+ constructor(settings: MistralChatModelSettings);
51
52
  readonly provider = "mistral";
52
53
  get modelName(): "mistral-tiny" | "mistral-small" | "mistral-medium";
53
54
  readonly contextWindowSize: undefined;
54
55
  readonly tokenizer: undefined;
55
56
  readonly countPromptTokens: undefined;
56
- callAPI<RESULT>(prompt: MistralTextGenerationPrompt, options: {
57
- responseFormat: MistralTextGenerationResponseFormatType<RESULT>;
57
+ callAPI<RESULT>(prompt: MistralChatPrompt, options: {
58
+ responseFormat: MistralChatResponseFormatType<RESULT>;
58
59
  } & FunctionOptions): Promise<RESULT>;
59
- get settingsForEvent(): Partial<MistralTextGenerationModelSettings>;
60
- doGenerateText(prompt: MistralTextGenerationPrompt, options?: FunctionOptions): Promise<{
60
+ get settingsForEvent(): Partial<MistralChatModelSettings>;
61
+ doGenerateTexts(prompt: MistralChatPrompt, options?: FunctionOptions): Promise<{
61
62
  response: {
62
63
  object: string;
63
64
  usage: {
@@ -77,25 +78,25 @@ export declare class MistralTextGenerationModel extends AbstractModel<MistralTex
77
78
  index: number;
78
79
  }[];
79
80
  };
80
- text: string;
81
+ texts: string[];
81
82
  }>;
82
- doStreamText(prompt: MistralTextGenerationPrompt, options?: FunctionOptions): Promise<AsyncIterable<Delta<string>>>;
83
+ doStreamText(prompt: MistralChatPrompt, options?: FunctionOptions): Promise<AsyncIterable<Delta<string>>>;
83
84
  /**
84
85
  * Returns this model with a text prompt template.
85
86
  */
86
- withTextPrompt(): PromptTemplateTextStreamingModel<string, MistralTextGenerationPrompt, MistralTextGenerationModelSettings, this>;
87
+ withTextPrompt(): PromptTemplateTextStreamingModel<string, MistralChatPrompt, MistralChatModelSettings, this>;
87
88
  /**
88
89
  * Returns this model with an instruction prompt template.
89
90
  */
90
- withInstructionPrompt(): PromptTemplateTextStreamingModel<import("../../index.js").TextInstructionPrompt, MistralTextGenerationPrompt, MistralTextGenerationModelSettings, this>;
91
+ withInstructionPrompt(): PromptTemplateTextStreamingModel<import("../../index.js").TextInstructionPrompt, MistralChatPrompt, MistralChatModelSettings, this>;
91
92
  /**
92
93
  * Returns this model with a chat prompt template.
93
94
  */
94
- withChatPrompt(): PromptTemplateTextStreamingModel<import("../../index.js").TextChatPrompt, MistralTextGenerationPrompt, MistralTextGenerationModelSettings, this>;
95
- withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, MistralTextGenerationPrompt>): PromptTemplateTextStreamingModel<INPUT_PROMPT, MistralTextGenerationPrompt, MistralTextGenerationModelSettings, this>;
96
- withSettings(additionalSettings: Partial<MistralTextGenerationModelSettings>): this;
95
+ withChatPrompt(): PromptTemplateTextStreamingModel<import("../../index.js").TextChatPrompt, MistralChatPrompt, MistralChatModelSettings, this>;
96
+ withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, MistralChatPrompt>): PromptTemplateTextStreamingModel<INPUT_PROMPT, MistralChatPrompt, MistralChatModelSettings, this>;
97
+ withSettings(additionalSettings: Partial<MistralChatModelSettings>): this;
97
98
  }
98
- declare const mistralTextGenerationResponseSchema: z.ZodObject<{
99
+ declare const mistralChatResponseSchema: z.ZodObject<{
99
100
  id: z.ZodString;
100
101
  object: z.ZodString;
101
102
  created: z.ZodNumber;
@@ -178,12 +179,12 @@ declare const mistralTextGenerationResponseSchema: z.ZodObject<{
178
179
  index: number;
179
180
  }[];
180
181
  }>;
181
- export type MistralTextGenerationResponse = z.infer<typeof mistralTextGenerationResponseSchema>;
182
- export type MistralTextGenerationResponseFormatType<T> = {
182
+ export type MistralChatResponse = z.infer<typeof mistralChatResponseSchema>;
183
+ export type MistralChatResponseFormatType<T> = {
183
184
  stream: boolean;
184
185
  handler: ResponseHandler<T>;
185
186
  };
186
- export declare const MistralTextGenerationResponseFormat: {
187
+ export declare const MistralChatResponseFormat: {
187
188
  /**
188
189
  * Returns the response as a JSON object.
189
190
  */
@@ -219,7 +220,7 @@ export declare const MistralTextGenerationResponseFormat: {
219
220
  }) => Promise<AsyncIterable<Delta<string>>>;
220
221
  };
221
222
  };
222
- export type MistralTextGenerationDelta = Array<{
223
+ export type MistralChatDelta = Array<{
223
224
  role: "assistant" | "user" | undefined;
224
225
  content: string;
225
226
  isComplete: boolean;
@@ -10,7 +10,7 @@ import { parseEventSourceStream } from "../../util/streaming/parseEventSourceStr
10
10
  import { MistralApiConfiguration } from "./MistralApiConfiguration.js";
11
11
  import { failedMistralCallResponseHandler } from "./MistralError.js";
12
12
  import { chat, instruction, text } from "./MistralPromptTemplate.js";
13
- export class MistralTextGenerationModel extends AbstractModel {
13
+ export class MistralChatModel extends AbstractModel {
14
14
  constructor(settings) {
15
15
  super({ settings });
16
16
  Object.defineProperty(this, "provider", {
@@ -42,7 +42,7 @@ export class MistralTextGenerationModel extends AbstractModel {
42
42
  return this.settings.model;
43
43
  }
44
44
  async callAPI(prompt, options) {
45
- const { model, temperature, topP, safeMode, randomSeed, maxCompletionTokens, } = this.settings;
45
+ const { model, temperature, topP, safeMode, randomSeed, maxGenerationTokens, } = this.settings;
46
46
  const api = this.settings.api ?? new MistralApiConfiguration();
47
47
  const abortSignal = options.run?.abortSignal;
48
48
  const stream = options.responseFormat.stream;
@@ -59,7 +59,7 @@ export class MistralTextGenerationModel extends AbstractModel {
59
59
  model,
60
60
  temperature,
61
61
  top_p: topP,
62
- max_tokens: maxCompletionTokens,
62
+ max_tokens: maxGenerationTokens,
63
63
  safe_mode: safeMode,
64
64
  random_seed: randomSeed,
65
65
  },
@@ -71,7 +71,7 @@ export class MistralTextGenerationModel extends AbstractModel {
71
71
  }
72
72
  get settingsForEvent() {
73
73
  const eventSettingProperties = [
74
- "maxCompletionTokens",
74
+ "maxGenerationTokens",
75
75
  "temperature",
76
76
  "topP",
77
77
  "safeMode",
@@ -79,20 +79,20 @@ export class MistralTextGenerationModel extends AbstractModel {
79
79
  ];
80
80
  return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
81
81
  }
82
- async doGenerateText(prompt, options) {
82
+ async doGenerateTexts(prompt, options) {
83
83
  const response = await this.callAPI(prompt, {
84
84
  ...options,
85
- responseFormat: MistralTextGenerationResponseFormat.json,
85
+ responseFormat: MistralChatResponseFormat.json,
86
86
  });
87
87
  return {
88
88
  response,
89
- text: response.choices[0].message.content,
89
+ texts: response.choices.map((choice) => choice.message.content),
90
90
  };
91
91
  }
92
92
  doStreamText(prompt, options) {
93
93
  return this.callAPI(prompt, {
94
94
  ...options,
95
- responseFormat: MistralTextGenerationResponseFormat.textDeltaIterable,
95
+ responseFormat: MistralChatResponseFormat.textDeltaIterable,
96
96
  });
97
97
  }
98
98
  /**
@@ -120,10 +120,10 @@ export class MistralTextGenerationModel extends AbstractModel {
120
120
  });
121
121
  }
122
122
  withSettings(additionalSettings) {
123
- return new MistralTextGenerationModel(Object.assign({}, this.settings, additionalSettings));
123
+ return new MistralChatModel(Object.assign({}, this.settings, additionalSettings));
124
124
  }
125
125
  }
126
- const mistralTextGenerationResponseSchema = z.object({
126
+ const mistralChatResponseSchema = z.object({
127
127
  id: z.string(),
128
128
  object: z.string(),
129
129
  created: z.number(),
@@ -142,23 +142,23 @@ const mistralTextGenerationResponseSchema = z.object({
142
142
  total_tokens: z.number(),
143
143
  }),
144
144
  });
145
- export const MistralTextGenerationResponseFormat = {
145
+ export const MistralChatResponseFormat = {
146
146
  /**
147
147
  * Returns the response as a JSON object.
148
148
  */
149
149
  json: {
150
150
  stream: false,
151
- handler: createJsonResponseHandler(mistralTextGenerationResponseSchema),
151
+ handler: createJsonResponseHandler(mistralChatResponseSchema),
152
152
  },
153
153
  /**
154
154
  * Returns an async iterable over the text deltas (only the tex different of the first choice).
155
155
  */
156
156
  textDeltaIterable: {
157
157
  stream: true,
158
- handler: async ({ response }) => createMistralTextGenerationDeltaIterableQueue(response.body, (delta) => delta[0]?.delta.content ?? ""),
158
+ handler: async ({ response }) => createMistralChatDeltaIterableQueue(response.body, (delta) => delta[0]?.delta.content ?? ""),
159
159
  },
160
160
  };
161
- const mistralTextGenerationChunkSchema = new ZodSchema(z.object({
161
+ const mistralChatChunkSchema = new ZodSchema(z.object({
162
162
  id: z.string(),
163
163
  object: z.string().optional(),
164
164
  created: z.number().optional(),
@@ -175,7 +175,7 @@ const mistralTextGenerationChunkSchema = new ZodSchema(z.object({
175
175
  .optional(),
176
176
  })),
177
177
  }));
178
- async function createMistralTextGenerationDeltaIterableQueue(stream, extractDeltaValue) {
178
+ async function createMistralChatDeltaIterableQueue(stream, extractDeltaValue) {
179
179
  const queue = new AsyncQueue();
180
180
  const streamDelta = [];
181
181
  // process the stream asynchonously (no 'await' on purpose):
@@ -190,7 +190,7 @@ async function createMistralTextGenerationDeltaIterableQueue(stream, extractDelt
190
190
  }
191
191
  const parseResult = safeParseJSON({
192
192
  text: data,
193
- schema: mistralTextGenerationChunkSchema,
193
+ schema: mistralChatChunkSchema,
194
194
  });
195
195
  if (!parseResult.success) {
196
196
  queue.push({
@@ -1,17 +1,17 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.TextEmbedder = exports.TextGenerator = exports.Api = void 0;
3
+ exports.TextEmbedder = exports.ChatTextGenerator = exports.Api = void 0;
4
4
  const MistralApiConfiguration_js_1 = require("./MistralApiConfiguration.cjs");
5
5
  const MistralTextEmbeddingModel_js_1 = require("./MistralTextEmbeddingModel.cjs");
6
- const MistralTextGenerationModel_js_1 = require("./MistralTextGenerationModel.cjs");
6
+ const MistralChatModel_js_1 = require("./MistralChatModel.cjs");
7
7
  function Api(settings) {
8
8
  return new MistralApiConfiguration_js_1.MistralApiConfiguration(settings);
9
9
  }
10
10
  exports.Api = Api;
11
- function TextGenerator(settings) {
12
- return new MistralTextGenerationModel_js_1.MistralTextGenerationModel(settings);
11
+ function ChatTextGenerator(settings) {
12
+ return new MistralChatModel_js_1.MistralChatModel(settings);
13
13
  }
14
- exports.TextGenerator = TextGenerator;
14
+ exports.ChatTextGenerator = ChatTextGenerator;
15
15
  function TextEmbedder(settings) {
16
16
  return new MistralTextEmbeddingModel_js_1.MistralTextEmbeddingModel(settings);
17
17
  }
@@ -1,6 +1,7 @@
1
1
  import { MistralApiConfiguration, MistralApiConfigurationSettings } from "./MistralApiConfiguration.js";
2
2
  import { MistralTextEmbeddingModel, MistralTextEmbeddingModelSettings } from "./MistralTextEmbeddingModel.js";
3
- import { MistralTextGenerationModel, MistralTextGenerationModelSettings } from "./MistralTextGenerationModel.js";
3
+ import { MistralChatModel, MistralChatModelSettings } from "./MistralChatModel.js";
4
4
  export declare function Api(settings: MistralApiConfigurationSettings): MistralApiConfiguration;
5
- export declare function TextGenerator(settings: MistralTextGenerationModelSettings): MistralTextGenerationModel;
5
+ export declare function ChatTextGenerator(settings: MistralChatModelSettings): MistralChatModel;
6
6
  export declare function TextEmbedder(settings: MistralTextEmbeddingModelSettings): MistralTextEmbeddingModel;
7
+ export { MistralChatMessage as ChatMessage, MistralChatPrompt as ChatPrompt, } from "./MistralChatModel.js";
@@ -1,11 +1,11 @@
1
1
  import { MistralApiConfiguration, } from "./MistralApiConfiguration.js";
2
2
  import { MistralTextEmbeddingModel, } from "./MistralTextEmbeddingModel.js";
3
- import { MistralTextGenerationModel, } from "./MistralTextGenerationModel.js";
3
+ import { MistralChatModel, } from "./MistralChatModel.js";
4
4
  export function Api(settings) {
5
5
  return new MistralApiConfiguration(settings);
6
6
  }
7
- export function TextGenerator(settings) {
8
- return new MistralTextGenerationModel(settings);
7
+ export function ChatTextGenerator(settings) {
8
+ return new MistralChatModel(settings);
9
9
  }
10
10
  export function TextEmbedder(settings) {
11
11
  return new MistralTextEmbeddingModel(settings);
@@ -1,16 +1,16 @@
1
1
  import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
2
2
  import { TextChatPrompt } from "../../model-function/generate-text/prompt-template/ChatPrompt.js";
3
3
  import { TextInstructionPrompt } from "../../model-function/generate-text/prompt-template/InstructionPrompt.js";
4
- import { MistralTextGenerationPrompt } from "./MistralTextGenerationModel.js";
4
+ import { MistralChatPrompt } from "./MistralChatModel.js";
5
5
  /**
6
6
  * Formats a text prompt as a Mistral prompt.
7
7
  */
8
- export declare function text(): TextGenerationPromptTemplate<string, MistralTextGenerationPrompt>;
8
+ export declare function text(): TextGenerationPromptTemplate<string, MistralChatPrompt>;
9
9
  /**
10
10
  * Formats an instruction prompt as a Mistral prompt.
11
11
  */
12
- export declare function instruction(): TextGenerationPromptTemplate<TextInstructionPrompt, MistralTextGenerationPrompt>;
12
+ export declare function instruction(): TextGenerationPromptTemplate<TextInstructionPrompt, MistralChatPrompt>;
13
13
  /**
14
14
  * Formats a chat prompt as a Mistral prompt.
15
15
  */
16
- export declare function chat(): TextGenerationPromptTemplate<TextChatPrompt, MistralTextGenerationPrompt>;
16
+ export declare function chat(): TextGenerationPromptTemplate<TextChatPrompt, MistralChatPrompt>;
@@ -31,4 +31,4 @@ __exportStar(require("./MistralApiConfiguration.cjs"), exports);
31
31
  exports.mistral = __importStar(require("./MistralFacade.cjs"));
32
32
  exports.MistralPrompt = __importStar(require("./MistralPromptTemplate.cjs"));
33
33
  __exportStar(require("./MistralTextEmbeddingModel.cjs"), exports);
34
- __exportStar(require("./MistralTextGenerationModel.cjs"), exports);
34
+ __exportStar(require("./MistralChatModel.cjs"), exports);
@@ -3,4 +3,4 @@ export { MistralErrorData } from "./MistralError.js";
3
3
  export * as mistral from "./MistralFacade.js";
4
4
  export * as MistralPrompt from "./MistralPromptTemplate.js";
5
5
  export * from "./MistralTextEmbeddingModel.js";
6
- export * from "./MistralTextGenerationModel.js";
6
+ export * from "./MistralChatModel.js";
@@ -2,4 +2,4 @@ export * from "./MistralApiConfiguration.js";
2
2
  export * as mistral from "./MistralFacade.js";
3
3
  export * as MistralPrompt from "./MistralPromptTemplate.js";
4
4
  export * from "./MistralTextEmbeddingModel.js";
5
- export * from "./MistralTextGenerationModel.js";
5
+ export * from "./MistralChatModel.js";
@@ -1,10 +1,11 @@
1
1
  import { BaseUrlApiConfiguration } from "../../core/api/BaseUrlApiConfiguration.js";
2
2
  import { RetryFunction } from "../../core/api/RetryFunction.js";
3
3
  import { ThrottleFunction } from "../../core/api/ThrottleFunction.js";
4
+ export type OllamaApiConfigurationSettings = {
5
+ baseUrl?: string;
6
+ retry?: RetryFunction;
7
+ throttle?: ThrottleFunction;
8
+ };
4
9
  export declare class OllamaApiConfiguration extends BaseUrlApiConfiguration {
5
- constructor({ baseUrl, retry, throttle, }?: {
6
- baseUrl?: string;
7
- retry?: RetryFunction;
8
- throttle?: ThrottleFunction;
9
- });
10
+ constructor({ baseUrl, retry, throttle, }?: OllamaApiConfigurationSettings);
10
11
  }