modelfusion 0.95.0 → 0.96.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. package/README.md +3 -3
  2. package/core/api/postToApi.cjs +30 -1
  3. package/core/api/postToApi.d.ts +7 -1
  4. package/core/api/postToApi.js +29 -1
  5. package/model-provider/index.cjs +1 -0
  6. package/model-provider/index.d.ts +1 -0
  7. package/model-provider/index.js +1 -0
  8. package/model-provider/mistral/MistralApiConfiguration.cjs +22 -0
  9. package/model-provider/mistral/MistralApiConfiguration.d.ts +12 -0
  10. package/model-provider/mistral/MistralApiConfiguration.js +18 -0
  11. package/model-provider/mistral/MistralError.cjs +17 -0
  12. package/model-provider/mistral/MistralError.d.ts +13 -0
  13. package/model-provider/mistral/MistralError.js +14 -0
  14. package/model-provider/mistral/MistralFacade.cjs +18 -0
  15. package/model-provider/mistral/MistralFacade.d.ts +6 -0
  16. package/model-provider/mistral/MistralFacade.js +12 -0
  17. package/model-provider/mistral/MistralPromptTemplate.cjs +64 -0
  18. package/model-provider/mistral/MistralPromptTemplate.d.ts +16 -0
  19. package/model-provider/mistral/MistralPromptTemplate.js +58 -0
  20. package/model-provider/mistral/MistralTextEmbeddingModel.cjs +100 -0
  21. package/model-provider/mistral/MistralTextEmbeddingModel.d.ts +106 -0
  22. package/model-provider/mistral/MistralTextEmbeddingModel.js +96 -0
  23. package/model-provider/mistral/MistralTextGenerationModel.cjs +254 -0
  24. package/model-provider/mistral/MistralTextGenerationModel.d.ts +231 -0
  25. package/model-provider/mistral/MistralTextGenerationModel.js +250 -0
  26. package/model-provider/mistral/index.cjs +34 -0
  27. package/model-provider/mistral/index.d.ts +6 -0
  28. package/model-provider/mistral/index.js +5 -0
  29. package/model-provider/ollama/OllamaError.cjs +5 -30
  30. package/model-provider/ollama/OllamaError.js +5 -29
  31. package/model-provider/ollama/OllamaTextEmbeddingModel.cjs +1 -7
  32. package/model-provider/ollama/OllamaTextEmbeddingModel.d.ts +0 -1
  33. package/model-provider/ollama/OllamaTextEmbeddingModel.js +1 -7
  34. package/model-provider/openai/OpenAICompletionModel.d.ts +4 -4
  35. package/model-provider/openai/OpenAIError.cjs +9 -34
  36. package/model-provider/openai/OpenAIError.d.ts +1 -3
  37. package/model-provider/openai/OpenAIError.js +9 -33
  38. package/model-provider/openai/chat/AbstractOpenAIChatModel.d.ts +6 -6
  39. package/model-provider/openai/chat/OpenAIChatFunctionCallStructureGenerationModel.d.ts +1 -1
  40. package/model-provider/whispercpp/WhisperCppTranscriptionModel.cjs +2 -1
  41. package/model-provider/whispercpp/WhisperCppTranscriptionModel.js +2 -1
  42. package/package.json +1 -1
@@ -0,0 +1,96 @@
1
+ import { z } from "zod";
2
+ import { callWithRetryAndThrottle } from "../../core/api/callWithRetryAndThrottle.js";
3
+ import { createJsonResponseHandler, postJsonToApi, } from "../../core/api/postToApi.js";
4
+ import { AbstractModel } from "../../model-function/AbstractModel.js";
5
+ import { MistralApiConfiguration } from "./MistralApiConfiguration.js";
6
+ import { failedMistralCallResponseHandler } from "./MistralError.js";
7
+ export class MistralTextEmbeddingModel extends AbstractModel {
8
+ constructor(settings) {
9
+ super({ settings });
10
+ Object.defineProperty(this, "provider", {
11
+ enumerable: true,
12
+ configurable: true,
13
+ writable: true,
14
+ value: "mistral"
15
+ });
16
+ Object.defineProperty(this, "maxValuesPerCall", {
17
+ enumerable: true,
18
+ configurable: true,
19
+ writable: true,
20
+ value: 32
21
+ });
22
+ /**
23
+ * Parallel calls are technically possible, but I have been hitting rate limits and disabled
24
+ * them for now.
25
+ */
26
+ Object.defineProperty(this, "isParallelizable", {
27
+ enumerable: true,
28
+ configurable: true,
29
+ writable: true,
30
+ value: false
31
+ });
32
+ Object.defineProperty(this, "embeddingDimensions", {
33
+ enumerable: true,
34
+ configurable: true,
35
+ writable: true,
36
+ value: 1024
37
+ });
38
+ }
39
+ get modelName() {
40
+ return this.settings.model;
41
+ }
42
+ async callAPI(texts, options) {
43
+ if (texts.length > this.maxValuesPerCall) {
44
+ throw new Error(`The Mistral embedding API only supports ${this.maxValuesPerCall} texts per API call.`);
45
+ }
46
+ const api = this.settings.api ?? new MistralApiConfiguration();
47
+ const abortSignal = options?.run?.abortSignal;
48
+ const model = this.settings.model;
49
+ const encodingFormat = this.settings.encodingFormat ?? "float";
50
+ return callWithRetryAndThrottle({
51
+ retry: this.settings.api?.retry,
52
+ throttle: this.settings.api?.throttle,
53
+ call: async () => postJsonToApi({
54
+ url: api.assembleUrl(`/embeddings`),
55
+ headers: api.headers,
56
+ body: {
57
+ model,
58
+ input: texts,
59
+ encoding_format: encodingFormat,
60
+ },
61
+ failedResponseHandler: failedMistralCallResponseHandler,
62
+ successfulResponseHandler: createJsonResponseHandler(MistralTextEmbeddingResponseSchema),
63
+ abortSignal,
64
+ }),
65
+ });
66
+ }
67
+ get settingsForEvent() {
68
+ return {
69
+ encodingFormat: this.settings.encodingFormat,
70
+ };
71
+ }
72
+ async doEmbedValues(texts, options) {
73
+ const response = await this.callAPI(texts, options);
74
+ return {
75
+ response,
76
+ embeddings: response.data.map((entry) => entry.embedding),
77
+ };
78
+ }
79
+ withSettings(additionalSettings) {
80
+ return new MistralTextEmbeddingModel(Object.assign({}, this.settings, additionalSettings));
81
+ }
82
+ }
83
+ const MistralTextEmbeddingResponseSchema = z.object({
84
+ id: z.string(),
85
+ object: z.string(),
86
+ data: z.array(z.object({
87
+ object: z.string(),
88
+ embedding: z.array(z.number()),
89
+ index: z.number(),
90
+ })),
91
+ model: z.string(),
92
+ usage: z.object({
93
+ prompt_tokens: z.number(),
94
+ total_tokens: z.number(),
95
+ }),
96
+ });
@@ -0,0 +1,254 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.MistralTextGenerationResponseFormat = exports.MistralTextGenerationModel = void 0;
4
+ const zod_1 = require("zod");
5
+ const callWithRetryAndThrottle_js_1 = require("../../core/api/callWithRetryAndThrottle.cjs");
6
+ const postToApi_js_1 = require("../../core/api/postToApi.cjs");
7
+ const ZodSchema_js_1 = require("../../core/schema/ZodSchema.cjs");
8
+ const parseJSON_js_1 = require("../../core/schema/parseJSON.cjs");
9
+ const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
10
+ const PromptTemplateTextStreamingModel_js_1 = require("../../model-function/generate-text/PromptTemplateTextStreamingModel.cjs");
11
+ const AsyncQueue_js_1 = require("../../util/AsyncQueue.cjs");
12
+ const parseEventSourceStream_js_1 = require("../../util/streaming/parseEventSourceStream.cjs");
13
+ const MistralApiConfiguration_js_1 = require("./MistralApiConfiguration.cjs");
14
+ const MistralError_js_1 = require("./MistralError.cjs");
15
+ const MistralPromptTemplate_js_1 = require("./MistralPromptTemplate.cjs");
16
+ class MistralTextGenerationModel extends AbstractModel_js_1.AbstractModel {
17
+ constructor(settings) {
18
+ super({ settings });
19
+ Object.defineProperty(this, "provider", {
20
+ enumerable: true,
21
+ configurable: true,
22
+ writable: true,
23
+ value: "mistral"
24
+ });
25
+ Object.defineProperty(this, "contextWindowSize", {
26
+ enumerable: true,
27
+ configurable: true,
28
+ writable: true,
29
+ value: undefined
30
+ });
31
+ Object.defineProperty(this, "tokenizer", {
32
+ enumerable: true,
33
+ configurable: true,
34
+ writable: true,
35
+ value: undefined
36
+ });
37
+ Object.defineProperty(this, "countPromptTokens", {
38
+ enumerable: true,
39
+ configurable: true,
40
+ writable: true,
41
+ value: undefined
42
+ });
43
+ }
44
+ get modelName() {
45
+ return this.settings.model;
46
+ }
47
+ async callAPI(prompt, options) {
48
+ const { model, temperature, topP, safeMode, randomSeed, maxCompletionTokens, } = this.settings;
49
+ const api = this.settings.api ?? new MistralApiConfiguration_js_1.MistralApiConfiguration();
50
+ const abortSignal = options.run?.abortSignal;
51
+ const stream = options.responseFormat.stream;
52
+ const successfulResponseHandler = options.responseFormat.handler;
53
+ return (0, callWithRetryAndThrottle_js_1.callWithRetryAndThrottle)({
54
+ retry: api.retry,
55
+ throttle: api.throttle,
56
+ call: async () => (0, postToApi_js_1.postJsonToApi)({
57
+ url: api.assembleUrl(`/chat/completions`),
58
+ headers: api.headers,
59
+ body: {
60
+ stream,
61
+ messages: prompt,
62
+ model,
63
+ temperature,
64
+ top_p: topP,
65
+ max_tokens: maxCompletionTokens,
66
+ safe_mode: safeMode,
67
+ random_seed: randomSeed,
68
+ },
69
+ failedResponseHandler: MistralError_js_1.failedMistralCallResponseHandler,
70
+ successfulResponseHandler,
71
+ abortSignal,
72
+ }),
73
+ });
74
+ }
75
+ get settingsForEvent() {
76
+ const eventSettingProperties = [
77
+ "maxCompletionTokens",
78
+ "temperature",
79
+ "topP",
80
+ "safeMode",
81
+ "randomSeed",
82
+ ];
83
+ return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
84
+ }
85
+ async doGenerateText(prompt, options) {
86
+ const response = await this.callAPI(prompt, {
87
+ ...options,
88
+ responseFormat: exports.MistralTextGenerationResponseFormat.json,
89
+ });
90
+ return {
91
+ response,
92
+ text: response.choices[0].message.content,
93
+ };
94
+ }
95
+ doStreamText(prompt, options) {
96
+ return this.callAPI(prompt, {
97
+ ...options,
98
+ responseFormat: exports.MistralTextGenerationResponseFormat.textDeltaIterable,
99
+ });
100
+ }
101
+ /**
102
+ * Returns this model with a text prompt template.
103
+ */
104
+ withTextPrompt() {
105
+ return this.withPromptTemplate((0, MistralPromptTemplate_js_1.text)());
106
+ }
107
+ /**
108
+ * Returns this model with an instruction prompt template.
109
+ */
110
+ withInstructionPrompt() {
111
+ return this.withPromptTemplate((0, MistralPromptTemplate_js_1.instruction)());
112
+ }
113
+ /**
114
+ * Returns this model with a chat prompt template.
115
+ */
116
+ withChatPrompt() {
117
+ return this.withPromptTemplate((0, MistralPromptTemplate_js_1.chat)());
118
+ }
119
+ withPromptTemplate(promptTemplate) {
120
+ return new PromptTemplateTextStreamingModel_js_1.PromptTemplateTextStreamingModel({
121
+ model: this,
122
+ promptTemplate,
123
+ });
124
+ }
125
+ withSettings(additionalSettings) {
126
+ return new MistralTextGenerationModel(Object.assign({}, this.settings, additionalSettings));
127
+ }
128
+ }
129
+ exports.MistralTextGenerationModel = MistralTextGenerationModel;
130
+ const mistralTextGenerationResponseSchema = zod_1.z.object({
131
+ id: zod_1.z.string(),
132
+ object: zod_1.z.string(),
133
+ created: zod_1.z.number(),
134
+ model: zod_1.z.string(),
135
+ choices: zod_1.z.array(zod_1.z.object({
136
+ index: zod_1.z.number(),
137
+ message: zod_1.z.object({
138
+ role: zod_1.z.enum(["user", "assistant"]),
139
+ content: zod_1.z.string(),
140
+ }),
141
+ finish_reason: zod_1.z.enum(["stop", "length", "model_length"]),
142
+ })),
143
+ usage: zod_1.z.object({
144
+ prompt_tokens: zod_1.z.number(),
145
+ completion_tokens: zod_1.z.number(),
146
+ total_tokens: zod_1.z.number(),
147
+ }),
148
+ });
149
+ exports.MistralTextGenerationResponseFormat = {
150
+ /**
151
+ * Returns the response as a JSON object.
152
+ */
153
+ json: {
154
+ stream: false,
155
+ handler: (0, postToApi_js_1.createJsonResponseHandler)(mistralTextGenerationResponseSchema),
156
+ },
157
+ /**
158
+ * Returns an async iterable over the text deltas (only the tex different of the first choice).
159
+ */
160
+ textDeltaIterable: {
161
+ stream: true,
162
+ handler: async ({ response }) => createMistralTextGenerationDeltaIterableQueue(response.body, (delta) => delta[0]?.delta.content ?? ""),
163
+ },
164
+ };
165
+ const mistralTextGenerationChunkSchema = new ZodSchema_js_1.ZodSchema(zod_1.z.object({
166
+ id: zod_1.z.string(),
167
+ object: zod_1.z.string().optional(),
168
+ created: zod_1.z.number().optional(),
169
+ model: zod_1.z.string(),
170
+ choices: zod_1.z.array(zod_1.z.object({
171
+ index: zod_1.z.number(),
172
+ delta: zod_1.z.object({
173
+ role: zod_1.z.enum(["assistant", "user"]).optional().nullable(),
174
+ content: zod_1.z.string().nullable().optional(),
175
+ }),
176
+ finish_reason: zod_1.z
177
+ .enum(["stop", "length", "model_length"])
178
+ .nullable()
179
+ .optional(),
180
+ })),
181
+ }));
182
+ async function createMistralTextGenerationDeltaIterableQueue(stream, extractDeltaValue) {
183
+ const queue = new AsyncQueue_js_1.AsyncQueue();
184
+ const streamDelta = [];
185
+ // process the stream asynchonously (no 'await' on purpose):
186
+ (0, parseEventSourceStream_js_1.parseEventSourceStream)({ stream })
187
+ .then(async (events) => {
188
+ try {
189
+ for await (const event of events) {
190
+ const data = event.data;
191
+ if (data === "[DONE]") {
192
+ queue.close();
193
+ return;
194
+ }
195
+ const parseResult = (0, parseJSON_js_1.safeParseJSON)({
196
+ text: data,
197
+ schema: mistralTextGenerationChunkSchema,
198
+ });
199
+ if (!parseResult.success) {
200
+ queue.push({
201
+ type: "error",
202
+ error: parseResult.error,
203
+ });
204
+ // Note: the queue is not closed on purpose. Some providers might add additional
205
+ // chunks that are not parsable, and ModelFusion should be resilient to that.
206
+ continue;
207
+ }
208
+ const completionChunk = parseResult.data;
209
+ for (let i = 0; i < completionChunk.choices.length; i++) {
210
+ const eventChoice = completionChunk.choices[i];
211
+ const delta = eventChoice.delta;
212
+ if (streamDelta[i] == null) {
213
+ streamDelta[i] = {
214
+ role: undefined,
215
+ content: "",
216
+ isComplete: false,
217
+ delta,
218
+ };
219
+ }
220
+ const choice = streamDelta[i];
221
+ choice.delta = delta;
222
+ if (eventChoice.finish_reason != null) {
223
+ choice.isComplete = true;
224
+ }
225
+ if (delta.content != undefined) {
226
+ choice.content += delta.content;
227
+ }
228
+ if (delta.role != undefined) {
229
+ choice.role = delta.role;
230
+ }
231
+ }
232
+ // Since we're mutating the choices array in an async scenario,
233
+ // we need to make a deep copy:
234
+ const streamDeltaDeepCopy = JSON.parse(JSON.stringify(streamDelta));
235
+ queue.push({
236
+ type: "delta",
237
+ fullDelta: streamDeltaDeepCopy,
238
+ valueDelta: extractDeltaValue(streamDeltaDeepCopy),
239
+ });
240
+ }
241
+ }
242
+ catch (error) {
243
+ queue.push({ type: "error", error });
244
+ queue.close();
245
+ return;
246
+ }
247
+ })
248
+ .catch((error) => {
249
+ queue.push({ type: "error", error });
250
+ queue.close();
251
+ return;
252
+ });
253
+ return queue;
254
+ }
@@ -0,0 +1,231 @@
1
+ import { z } from "zod";
2
+ import { FunctionOptions } from "../../core/FunctionOptions.js";
3
+ import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
4
+ import { ResponseHandler } from "../../core/api/postToApi.js";
5
+ import { AbstractModel } from "../../model-function/AbstractModel.js";
6
+ import { Delta } from "../../model-function/Delta.js";
7
+ import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
8
+ import { TextGenerationModelSettings, TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
9
+ import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
10
+ export type MistralTextGenerationPrompt = Array<{
11
+ role: "system" | "user" | "assistant";
12
+ content: string;
13
+ }>;
14
+ export interface MistralTextGenerationModelSettings extends TextGenerationModelSettings {
15
+ api?: ApiConfiguration;
16
+ model: "mistral-tiny" | "mistral-small" | "mistral-medium";
17
+ /**
18
+ * What sampling temperature to use, between 0.0 and 2.0.
19
+ * Higher values like 0.8 will make the output more random,
20
+ * while lower values like 0.2 will make it more focused and deterministic.
21
+ *
22
+ * We generally recommend altering this or top_p but not both.
23
+ *
24
+ * Default: 0.7
25
+ */
26
+ temperature?: number | null;
27
+ /**
28
+ * Nucleus sampling, where the model considers the results of the tokens
29
+ * with top_p probability mass. So 0.1 means only the tokens comprising
30
+ * the top 10% probability mass are considered.
31
+ *
32
+ * We generally recommend altering this or temperature but not both.
33
+ *
34
+ * Default: 1
35
+ */
36
+ topP?: number;
37
+ /**
38
+ * Whether to inject a safety prompt before all conversations.
39
+ *
40
+ * Default: false
41
+ */
42
+ safeMode?: boolean;
43
+ /**
44
+ * The seed to use for random sampling. If set, different calls will
45
+ * generate deterministic results.
46
+ */
47
+ randomSeed?: number | null;
48
+ }
49
+ export declare class MistralTextGenerationModel extends AbstractModel<MistralTextGenerationModelSettings> implements TextStreamingModel<MistralTextGenerationPrompt, MistralTextGenerationModelSettings> {
50
+ constructor(settings: MistralTextGenerationModelSettings);
51
+ readonly provider = "mistral";
52
+ get modelName(): "mistral-tiny" | "mistral-small" | "mistral-medium";
53
+ readonly contextWindowSize: undefined;
54
+ readonly tokenizer: undefined;
55
+ readonly countPromptTokens: undefined;
56
+ callAPI<RESULT>(prompt: MistralTextGenerationPrompt, options: {
57
+ responseFormat: MistralTextGenerationResponseFormatType<RESULT>;
58
+ } & FunctionOptions): Promise<RESULT>;
59
+ get settingsForEvent(): Partial<MistralTextGenerationModelSettings>;
60
+ doGenerateText(prompt: MistralTextGenerationPrompt, options?: FunctionOptions): Promise<{
61
+ response: {
62
+ object: string;
63
+ usage: {
64
+ prompt_tokens: number;
65
+ total_tokens: number;
66
+ completion_tokens: number;
67
+ };
68
+ model: string;
69
+ id: string;
70
+ created: number;
71
+ choices: {
72
+ message: {
73
+ role: "user" | "assistant";
74
+ content: string;
75
+ };
76
+ finish_reason: "length" | "stop" | "model_length";
77
+ index: number;
78
+ }[];
79
+ };
80
+ text: string;
81
+ }>;
82
+ doStreamText(prompt: MistralTextGenerationPrompt, options?: FunctionOptions): Promise<AsyncIterable<Delta<string>>>;
83
+ /**
84
+ * Returns this model with a text prompt template.
85
+ */
86
+ withTextPrompt(): PromptTemplateTextStreamingModel<string, MistralTextGenerationPrompt, MistralTextGenerationModelSettings, this>;
87
+ /**
88
+ * Returns this model with an instruction prompt template.
89
+ */
90
+ withInstructionPrompt(): PromptTemplateTextStreamingModel<import("../../index.js").TextInstructionPrompt, MistralTextGenerationPrompt, MistralTextGenerationModelSettings, this>;
91
+ /**
92
+ * Returns this model with a chat prompt template.
93
+ */
94
+ withChatPrompt(): PromptTemplateTextStreamingModel<import("../../index.js").TextChatPrompt, MistralTextGenerationPrompt, MistralTextGenerationModelSettings, this>;
95
+ withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, MistralTextGenerationPrompt>): PromptTemplateTextStreamingModel<INPUT_PROMPT, MistralTextGenerationPrompt, MistralTextGenerationModelSettings, this>;
96
+ withSettings(additionalSettings: Partial<MistralTextGenerationModelSettings>): this;
97
+ }
98
+ declare const mistralTextGenerationResponseSchema: z.ZodObject<{
99
+ id: z.ZodString;
100
+ object: z.ZodString;
101
+ created: z.ZodNumber;
102
+ model: z.ZodString;
103
+ choices: z.ZodArray<z.ZodObject<{
104
+ index: z.ZodNumber;
105
+ message: z.ZodObject<{
106
+ role: z.ZodEnum<["user", "assistant"]>;
107
+ content: z.ZodString;
108
+ }, "strip", z.ZodTypeAny, {
109
+ role: "user" | "assistant";
110
+ content: string;
111
+ }, {
112
+ role: "user" | "assistant";
113
+ content: string;
114
+ }>;
115
+ finish_reason: z.ZodEnum<["stop", "length", "model_length"]>;
116
+ }, "strip", z.ZodTypeAny, {
117
+ message: {
118
+ role: "user" | "assistant";
119
+ content: string;
120
+ };
121
+ finish_reason: "length" | "stop" | "model_length";
122
+ index: number;
123
+ }, {
124
+ message: {
125
+ role: "user" | "assistant";
126
+ content: string;
127
+ };
128
+ finish_reason: "length" | "stop" | "model_length";
129
+ index: number;
130
+ }>, "many">;
131
+ usage: z.ZodObject<{
132
+ prompt_tokens: z.ZodNumber;
133
+ completion_tokens: z.ZodNumber;
134
+ total_tokens: z.ZodNumber;
135
+ }, "strip", z.ZodTypeAny, {
136
+ prompt_tokens: number;
137
+ total_tokens: number;
138
+ completion_tokens: number;
139
+ }, {
140
+ prompt_tokens: number;
141
+ total_tokens: number;
142
+ completion_tokens: number;
143
+ }>;
144
+ }, "strip", z.ZodTypeAny, {
145
+ object: string;
146
+ usage: {
147
+ prompt_tokens: number;
148
+ total_tokens: number;
149
+ completion_tokens: number;
150
+ };
151
+ model: string;
152
+ id: string;
153
+ created: number;
154
+ choices: {
155
+ message: {
156
+ role: "user" | "assistant";
157
+ content: string;
158
+ };
159
+ finish_reason: "length" | "stop" | "model_length";
160
+ index: number;
161
+ }[];
162
+ }, {
163
+ object: string;
164
+ usage: {
165
+ prompt_tokens: number;
166
+ total_tokens: number;
167
+ completion_tokens: number;
168
+ };
169
+ model: string;
170
+ id: string;
171
+ created: number;
172
+ choices: {
173
+ message: {
174
+ role: "user" | "assistant";
175
+ content: string;
176
+ };
177
+ finish_reason: "length" | "stop" | "model_length";
178
+ index: number;
179
+ }[];
180
+ }>;
181
+ export type MistralTextGenerationResponse = z.infer<typeof mistralTextGenerationResponseSchema>;
182
+ export type MistralTextGenerationResponseFormatType<T> = {
183
+ stream: boolean;
184
+ handler: ResponseHandler<T>;
185
+ };
186
+ export declare const MistralTextGenerationResponseFormat: {
187
+ /**
188
+ * Returns the response as a JSON object.
189
+ */
190
+ json: {
191
+ stream: false;
192
+ handler: ResponseHandler<{
193
+ object: string;
194
+ usage: {
195
+ prompt_tokens: number;
196
+ total_tokens: number;
197
+ completion_tokens: number;
198
+ };
199
+ model: string;
200
+ id: string;
201
+ created: number;
202
+ choices: {
203
+ message: {
204
+ role: "user" | "assistant";
205
+ content: string;
206
+ };
207
+ finish_reason: "length" | "stop" | "model_length";
208
+ index: number;
209
+ }[];
210
+ }>;
211
+ };
212
+ /**
213
+ * Returns an async iterable over the text deltas (only the tex different of the first choice).
214
+ */
215
+ textDeltaIterable: {
216
+ stream: true;
217
+ handler: ({ response }: {
218
+ response: Response;
219
+ }) => Promise<AsyncIterable<Delta<string>>>;
220
+ };
221
+ };
222
+ export type MistralTextGenerationDelta = Array<{
223
+ role: "assistant" | "user" | undefined;
224
+ content: string;
225
+ isComplete: boolean;
226
+ delta: {
227
+ role?: "assistant" | "user" | null;
228
+ content?: string | null;
229
+ };
230
+ }>;
231
+ export {};