@ai-sdk/mistral 3.0.8 → 3.0.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +14 -0
- package/dist/index.js +1 -1
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +1 -1
- package/dist/index.mjs.map +1 -1
- package/package.json +5 -4
- package/src/__fixtures__/mistral-generate-text.1.json +22 -0
- package/src/__snapshots__/convert-to-mistral-chat-messages.test.ts.snap +57 -0
- package/src/__snapshots__/mistral-embedding-model.test.ts.snap +44 -0
- package/src/convert-mistral-usage.ts +46 -0
- package/src/convert-to-mistral-chat-messages.test.ts +372 -0
- package/src/convert-to-mistral-chat-messages.ts +163 -0
- package/src/get-response-metadata.ts +15 -0
- package/src/index.ts +7 -0
- package/src/map-mistral-finish-reason.ts +17 -0
- package/src/mistral-chat-language-model.test.ts +1755 -0
- package/src/mistral-chat-language-model.ts +580 -0
- package/src/mistral-chat-options.ts +63 -0
- package/src/mistral-chat-prompt.ts +46 -0
- package/src/mistral-embedding-model.test.ts +127 -0
- package/src/mistral-embedding-model.ts +94 -0
- package/src/mistral-embedding-options.ts +1 -0
- package/src/mistral-error.ts +17 -0
- package/src/mistral-prepare-tools.test.ts +178 -0
- package/src/mistral-prepare-tools.ts +97 -0
- package/src/mistral-provider.ts +147 -0
- package/src/version.ts +6 -0
|
@@ -0,0 +1,580 @@
|
|
|
1
|
+
import {
|
|
2
|
+
LanguageModelV3,
|
|
3
|
+
LanguageModelV3CallOptions,
|
|
4
|
+
LanguageModelV3Content,
|
|
5
|
+
LanguageModelV3FinishReason,
|
|
6
|
+
LanguageModelV3GenerateResult,
|
|
7
|
+
LanguageModelV3StreamPart,
|
|
8
|
+
LanguageModelV3StreamResult,
|
|
9
|
+
SharedV3Warning,
|
|
10
|
+
} from '@ai-sdk/provider';
|
|
11
|
+
import {
|
|
12
|
+
combineHeaders,
|
|
13
|
+
createEventSourceResponseHandler,
|
|
14
|
+
createJsonResponseHandler,
|
|
15
|
+
FetchFunction,
|
|
16
|
+
generateId,
|
|
17
|
+
injectJsonInstructionIntoMessages,
|
|
18
|
+
parseProviderOptions,
|
|
19
|
+
ParseResult,
|
|
20
|
+
postJsonToApi,
|
|
21
|
+
} from '@ai-sdk/provider-utils';
|
|
22
|
+
import { z } from 'zod/v4';
|
|
23
|
+
import { convertMistralUsage, MistralUsage } from './convert-mistral-usage';
|
|
24
|
+
import { convertToMistralChatMessages } from './convert-to-mistral-chat-messages';
|
|
25
|
+
import { getResponseMetadata } from './get-response-metadata';
|
|
26
|
+
import { mapMistralFinishReason } from './map-mistral-finish-reason';
|
|
27
|
+
import {
|
|
28
|
+
MistralChatModelId,
|
|
29
|
+
mistralLanguageModelOptions,
|
|
30
|
+
} from './mistral-chat-options';
|
|
31
|
+
import { mistralFailedResponseHandler } from './mistral-error';
|
|
32
|
+
import { prepareTools } from './mistral-prepare-tools';
|
|
33
|
+
|
|
34
|
+
type MistralChatConfig = {
|
|
35
|
+
provider: string;
|
|
36
|
+
baseURL: string;
|
|
37
|
+
headers: () => Record<string, string | undefined>;
|
|
38
|
+
fetch?: FetchFunction;
|
|
39
|
+
generateId?: () => string;
|
|
40
|
+
};
|
|
41
|
+
|
|
42
|
+
export class MistralChatLanguageModel implements LanguageModelV3 {
|
|
43
|
+
readonly specificationVersion = 'v3';
|
|
44
|
+
|
|
45
|
+
readonly modelId: MistralChatModelId;
|
|
46
|
+
|
|
47
|
+
private readonly config: MistralChatConfig;
|
|
48
|
+
private readonly generateId: () => string;
|
|
49
|
+
|
|
50
|
+
constructor(modelId: MistralChatModelId, config: MistralChatConfig) {
|
|
51
|
+
this.modelId = modelId;
|
|
52
|
+
this.config = config;
|
|
53
|
+
this.generateId = config.generateId ?? generateId;
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
get provider(): string {
|
|
57
|
+
return this.config.provider;
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
readonly supportedUrls: Record<string, RegExp[]> = {
|
|
61
|
+
'application/pdf': [/^https:\/\/.*$/],
|
|
62
|
+
};
|
|
63
|
+
|
|
64
|
+
private async getArgs({
|
|
65
|
+
prompt,
|
|
66
|
+
maxOutputTokens,
|
|
67
|
+
temperature,
|
|
68
|
+
topP,
|
|
69
|
+
topK,
|
|
70
|
+
frequencyPenalty,
|
|
71
|
+
presencePenalty,
|
|
72
|
+
stopSequences,
|
|
73
|
+
responseFormat,
|
|
74
|
+
seed,
|
|
75
|
+
providerOptions,
|
|
76
|
+
tools,
|
|
77
|
+
toolChoice,
|
|
78
|
+
}: LanguageModelV3CallOptions) {
|
|
79
|
+
const warnings: SharedV3Warning[] = [];
|
|
80
|
+
|
|
81
|
+
const options =
|
|
82
|
+
(await parseProviderOptions({
|
|
83
|
+
provider: 'mistral',
|
|
84
|
+
providerOptions,
|
|
85
|
+
schema: mistralLanguageModelOptions,
|
|
86
|
+
})) ?? {};
|
|
87
|
+
|
|
88
|
+
if (topK != null) {
|
|
89
|
+
warnings.push({ type: 'unsupported', feature: 'topK' });
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
if (frequencyPenalty != null) {
|
|
93
|
+
warnings.push({ type: 'unsupported', feature: 'frequencyPenalty' });
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
if (presencePenalty != null) {
|
|
97
|
+
warnings.push({ type: 'unsupported', feature: 'presencePenalty' });
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
if (stopSequences != null) {
|
|
101
|
+
warnings.push({ type: 'unsupported', feature: 'stopSequences' });
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
const structuredOutputs = options.structuredOutputs ?? true;
|
|
105
|
+
const strictJsonSchema = options.strictJsonSchema ?? false;
|
|
106
|
+
|
|
107
|
+
// For Mistral we need to need to instruct the model to return a JSON object.
|
|
108
|
+
// https://docs.mistral.ai/capabilities/structured-output/structured_output_overview/
|
|
109
|
+
if (responseFormat?.type === 'json' && !responseFormat?.schema) {
|
|
110
|
+
prompt = injectJsonInstructionIntoMessages({
|
|
111
|
+
messages: prompt,
|
|
112
|
+
schema: responseFormat.schema,
|
|
113
|
+
});
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
const baseArgs = {
|
|
117
|
+
// model id:
|
|
118
|
+
model: this.modelId,
|
|
119
|
+
|
|
120
|
+
// model specific settings:
|
|
121
|
+
safe_prompt: options.safePrompt,
|
|
122
|
+
|
|
123
|
+
// standardized settings:
|
|
124
|
+
max_tokens: maxOutputTokens,
|
|
125
|
+
temperature,
|
|
126
|
+
top_p: topP,
|
|
127
|
+
random_seed: seed,
|
|
128
|
+
|
|
129
|
+
// response format:
|
|
130
|
+
response_format:
|
|
131
|
+
responseFormat?.type === 'json'
|
|
132
|
+
? structuredOutputs && responseFormat?.schema != null
|
|
133
|
+
? {
|
|
134
|
+
type: 'json_schema',
|
|
135
|
+
json_schema: {
|
|
136
|
+
schema: responseFormat.schema,
|
|
137
|
+
strict: strictJsonSchema,
|
|
138
|
+
name: responseFormat.name ?? 'response',
|
|
139
|
+
description: responseFormat.description,
|
|
140
|
+
},
|
|
141
|
+
}
|
|
142
|
+
: { type: 'json_object' }
|
|
143
|
+
: undefined,
|
|
144
|
+
|
|
145
|
+
// mistral-specific provider options:
|
|
146
|
+
document_image_limit: options.documentImageLimit,
|
|
147
|
+
document_page_limit: options.documentPageLimit,
|
|
148
|
+
|
|
149
|
+
// messages:
|
|
150
|
+
messages: convertToMistralChatMessages(prompt),
|
|
151
|
+
};
|
|
152
|
+
|
|
153
|
+
const {
|
|
154
|
+
tools: mistralTools,
|
|
155
|
+
toolChoice: mistralToolChoice,
|
|
156
|
+
toolWarnings,
|
|
157
|
+
} = prepareTools({
|
|
158
|
+
tools,
|
|
159
|
+
toolChoice,
|
|
160
|
+
});
|
|
161
|
+
|
|
162
|
+
return {
|
|
163
|
+
args: {
|
|
164
|
+
...baseArgs,
|
|
165
|
+
tools: mistralTools,
|
|
166
|
+
tool_choice: mistralToolChoice,
|
|
167
|
+
...(mistralTools != null && options.parallelToolCalls !== undefined
|
|
168
|
+
? { parallel_tool_calls: options.parallelToolCalls }
|
|
169
|
+
: {}),
|
|
170
|
+
},
|
|
171
|
+
warnings: [...warnings, ...toolWarnings],
|
|
172
|
+
};
|
|
173
|
+
}
|
|
174
|
+
|
|
175
|
+
async doGenerate(
|
|
176
|
+
options: LanguageModelV3CallOptions,
|
|
177
|
+
): Promise<LanguageModelV3GenerateResult> {
|
|
178
|
+
const { args: body, warnings } = await this.getArgs(options);
|
|
179
|
+
|
|
180
|
+
const {
|
|
181
|
+
responseHeaders,
|
|
182
|
+
value: response,
|
|
183
|
+
rawValue: rawResponse,
|
|
184
|
+
} = await postJsonToApi({
|
|
185
|
+
url: `${this.config.baseURL}/chat/completions`,
|
|
186
|
+
headers: combineHeaders(this.config.headers(), options.headers),
|
|
187
|
+
body,
|
|
188
|
+
failedResponseHandler: mistralFailedResponseHandler,
|
|
189
|
+
successfulResponseHandler: createJsonResponseHandler(
|
|
190
|
+
mistralChatResponseSchema,
|
|
191
|
+
),
|
|
192
|
+
abortSignal: options.abortSignal,
|
|
193
|
+
fetch: this.config.fetch,
|
|
194
|
+
});
|
|
195
|
+
|
|
196
|
+
const choice = response.choices[0];
|
|
197
|
+
const content: Array<LanguageModelV3Content> = [];
|
|
198
|
+
|
|
199
|
+
// process content parts in order to preserve sequence
|
|
200
|
+
if (
|
|
201
|
+
choice.message.content != null &&
|
|
202
|
+
Array.isArray(choice.message.content)
|
|
203
|
+
) {
|
|
204
|
+
for (const part of choice.message.content) {
|
|
205
|
+
if (part.type === 'thinking') {
|
|
206
|
+
const reasoningText = extractReasoningContent(part.thinking);
|
|
207
|
+
if (reasoningText.length > 0) {
|
|
208
|
+
content.push({ type: 'reasoning', text: reasoningText });
|
|
209
|
+
}
|
|
210
|
+
} else if (part.type === 'text') {
|
|
211
|
+
if (part.text.length > 0) {
|
|
212
|
+
content.push({ type: 'text', text: part.text });
|
|
213
|
+
}
|
|
214
|
+
}
|
|
215
|
+
}
|
|
216
|
+
} else {
|
|
217
|
+
// handle legacy string content
|
|
218
|
+
const text = extractTextContent(choice.message.content);
|
|
219
|
+
if (text != null && text.length > 0) {
|
|
220
|
+
content.push({ type: 'text', text });
|
|
221
|
+
}
|
|
222
|
+
}
|
|
223
|
+
|
|
224
|
+
// when there is a trailing assistant message, mistral will send the
|
|
225
|
+
// content of that message again. we skip this repeated content to
|
|
226
|
+
// avoid duplication, e.g. in continuation mode.
|
|
227
|
+
|
|
228
|
+
// tool calls:
|
|
229
|
+
if (choice.message.tool_calls != null) {
|
|
230
|
+
for (const toolCall of choice.message.tool_calls) {
|
|
231
|
+
content.push({
|
|
232
|
+
type: 'tool-call',
|
|
233
|
+
toolCallId: toolCall.id,
|
|
234
|
+
toolName: toolCall.function.name,
|
|
235
|
+
input: toolCall.function.arguments!,
|
|
236
|
+
});
|
|
237
|
+
}
|
|
238
|
+
}
|
|
239
|
+
|
|
240
|
+
return {
|
|
241
|
+
content,
|
|
242
|
+
finishReason: {
|
|
243
|
+
unified: mapMistralFinishReason(choice.finish_reason),
|
|
244
|
+
raw: choice.finish_reason ?? undefined,
|
|
245
|
+
},
|
|
246
|
+
usage: convertMistralUsage(response.usage),
|
|
247
|
+
request: { body },
|
|
248
|
+
response: {
|
|
249
|
+
...getResponseMetadata(response),
|
|
250
|
+
headers: responseHeaders,
|
|
251
|
+
body: rawResponse,
|
|
252
|
+
},
|
|
253
|
+
warnings,
|
|
254
|
+
};
|
|
255
|
+
}
|
|
256
|
+
|
|
257
|
+
async doStream(
|
|
258
|
+
options: LanguageModelV3CallOptions,
|
|
259
|
+
): Promise<LanguageModelV3StreamResult> {
|
|
260
|
+
const { args, warnings } = await this.getArgs(options);
|
|
261
|
+
const body = { ...args, stream: true };
|
|
262
|
+
|
|
263
|
+
const { responseHeaders, value: response } = await postJsonToApi({
|
|
264
|
+
url: `${this.config.baseURL}/chat/completions`,
|
|
265
|
+
headers: combineHeaders(this.config.headers(), options.headers),
|
|
266
|
+
body,
|
|
267
|
+
failedResponseHandler: mistralFailedResponseHandler,
|
|
268
|
+
successfulResponseHandler: createEventSourceResponseHandler(
|
|
269
|
+
mistralChatChunkSchema,
|
|
270
|
+
),
|
|
271
|
+
abortSignal: options.abortSignal,
|
|
272
|
+
fetch: this.config.fetch,
|
|
273
|
+
});
|
|
274
|
+
|
|
275
|
+
let finishReason: LanguageModelV3FinishReason = {
|
|
276
|
+
unified: 'other',
|
|
277
|
+
raw: undefined,
|
|
278
|
+
};
|
|
279
|
+
let usage: MistralUsage | undefined = undefined;
|
|
280
|
+
|
|
281
|
+
let isFirstChunk = true;
|
|
282
|
+
let activeText = false;
|
|
283
|
+
let activeReasoningId: string | null = null;
|
|
284
|
+
|
|
285
|
+
const generateId = this.generateId;
|
|
286
|
+
|
|
287
|
+
return {
|
|
288
|
+
stream: response.pipeThrough(
|
|
289
|
+
new TransformStream<
|
|
290
|
+
ParseResult<z.infer<typeof mistralChatChunkSchema>>,
|
|
291
|
+
LanguageModelV3StreamPart
|
|
292
|
+
>({
|
|
293
|
+
start(controller) {
|
|
294
|
+
controller.enqueue({ type: 'stream-start', warnings });
|
|
295
|
+
},
|
|
296
|
+
|
|
297
|
+
transform(chunk, controller) {
|
|
298
|
+
// Emit raw chunk if requested (before anything else)
|
|
299
|
+
if (options.includeRawChunks) {
|
|
300
|
+
controller.enqueue({ type: 'raw', rawValue: chunk.rawValue });
|
|
301
|
+
}
|
|
302
|
+
|
|
303
|
+
if (!chunk.success) {
|
|
304
|
+
controller.enqueue({ type: 'error', error: chunk.error });
|
|
305
|
+
return;
|
|
306
|
+
}
|
|
307
|
+
|
|
308
|
+
const value = chunk.value;
|
|
309
|
+
|
|
310
|
+
if (isFirstChunk) {
|
|
311
|
+
isFirstChunk = false;
|
|
312
|
+
|
|
313
|
+
controller.enqueue({
|
|
314
|
+
type: 'response-metadata',
|
|
315
|
+
...getResponseMetadata(value),
|
|
316
|
+
});
|
|
317
|
+
}
|
|
318
|
+
|
|
319
|
+
if (value.usage != null) {
|
|
320
|
+
usage = value.usage;
|
|
321
|
+
}
|
|
322
|
+
|
|
323
|
+
const choice = value.choices[0];
|
|
324
|
+
const delta = choice.delta;
|
|
325
|
+
|
|
326
|
+
const textContent = extractTextContent(delta.content);
|
|
327
|
+
|
|
328
|
+
if (delta.content != null && Array.isArray(delta.content)) {
|
|
329
|
+
for (const part of delta.content) {
|
|
330
|
+
if (part.type === 'thinking') {
|
|
331
|
+
const reasoningDelta = extractReasoningContent(part.thinking);
|
|
332
|
+
if (reasoningDelta.length > 0) {
|
|
333
|
+
if (activeReasoningId == null) {
|
|
334
|
+
// end any active text before starting reasoning
|
|
335
|
+
if (activeText) {
|
|
336
|
+
controller.enqueue({ type: 'text-end', id: '0' });
|
|
337
|
+
activeText = false;
|
|
338
|
+
}
|
|
339
|
+
|
|
340
|
+
activeReasoningId = generateId();
|
|
341
|
+
controller.enqueue({
|
|
342
|
+
type: 'reasoning-start',
|
|
343
|
+
id: activeReasoningId,
|
|
344
|
+
});
|
|
345
|
+
}
|
|
346
|
+
controller.enqueue({
|
|
347
|
+
type: 'reasoning-delta',
|
|
348
|
+
id: activeReasoningId,
|
|
349
|
+
delta: reasoningDelta,
|
|
350
|
+
});
|
|
351
|
+
}
|
|
352
|
+
}
|
|
353
|
+
}
|
|
354
|
+
}
|
|
355
|
+
|
|
356
|
+
if (textContent != null && textContent.length > 0) {
|
|
357
|
+
if (!activeText) {
|
|
358
|
+
// if we were in reasoning mode, end it before starting text
|
|
359
|
+
if (activeReasoningId != null) {
|
|
360
|
+
controller.enqueue({
|
|
361
|
+
type: 'reasoning-end',
|
|
362
|
+
id: activeReasoningId,
|
|
363
|
+
});
|
|
364
|
+
activeReasoningId = null;
|
|
365
|
+
}
|
|
366
|
+
controller.enqueue({ type: 'text-start', id: '0' });
|
|
367
|
+
activeText = true;
|
|
368
|
+
}
|
|
369
|
+
|
|
370
|
+
controller.enqueue({
|
|
371
|
+
type: 'text-delta',
|
|
372
|
+
id: '0',
|
|
373
|
+
delta: textContent,
|
|
374
|
+
});
|
|
375
|
+
}
|
|
376
|
+
|
|
377
|
+
if (delta?.tool_calls != null) {
|
|
378
|
+
for (const toolCall of delta.tool_calls) {
|
|
379
|
+
const toolCallId = toolCall.id;
|
|
380
|
+
const toolName = toolCall.function.name;
|
|
381
|
+
const input = toolCall.function.arguments;
|
|
382
|
+
|
|
383
|
+
controller.enqueue({
|
|
384
|
+
type: 'tool-input-start',
|
|
385
|
+
id: toolCallId,
|
|
386
|
+
toolName,
|
|
387
|
+
});
|
|
388
|
+
|
|
389
|
+
controller.enqueue({
|
|
390
|
+
type: 'tool-input-delta',
|
|
391
|
+
id: toolCallId,
|
|
392
|
+
delta: input,
|
|
393
|
+
});
|
|
394
|
+
|
|
395
|
+
controller.enqueue({
|
|
396
|
+
type: 'tool-input-end',
|
|
397
|
+
id: toolCallId,
|
|
398
|
+
});
|
|
399
|
+
|
|
400
|
+
controller.enqueue({
|
|
401
|
+
type: 'tool-call',
|
|
402
|
+
toolCallId,
|
|
403
|
+
toolName,
|
|
404
|
+
input,
|
|
405
|
+
});
|
|
406
|
+
}
|
|
407
|
+
}
|
|
408
|
+
|
|
409
|
+
if (choice.finish_reason != null) {
|
|
410
|
+
finishReason = {
|
|
411
|
+
unified: mapMistralFinishReason(choice.finish_reason),
|
|
412
|
+
raw: choice.finish_reason,
|
|
413
|
+
};
|
|
414
|
+
}
|
|
415
|
+
},
|
|
416
|
+
|
|
417
|
+
flush(controller) {
|
|
418
|
+
if (activeReasoningId != null) {
|
|
419
|
+
controller.enqueue({
|
|
420
|
+
type: 'reasoning-end',
|
|
421
|
+
id: activeReasoningId,
|
|
422
|
+
});
|
|
423
|
+
}
|
|
424
|
+
if (activeText) {
|
|
425
|
+
controller.enqueue({ type: 'text-end', id: '0' });
|
|
426
|
+
}
|
|
427
|
+
|
|
428
|
+
controller.enqueue({
|
|
429
|
+
type: 'finish',
|
|
430
|
+
finishReason,
|
|
431
|
+
usage: convertMistralUsage(usage),
|
|
432
|
+
});
|
|
433
|
+
},
|
|
434
|
+
}),
|
|
435
|
+
),
|
|
436
|
+
request: { body },
|
|
437
|
+
response: { headers: responseHeaders },
|
|
438
|
+
};
|
|
439
|
+
}
|
|
440
|
+
}
|
|
441
|
+
|
|
442
|
+
function extractReasoningContent(
|
|
443
|
+
thinking: Array<{ type: string; text: string }>,
|
|
444
|
+
) {
|
|
445
|
+
return thinking
|
|
446
|
+
.filter(chunk => chunk.type === 'text')
|
|
447
|
+
.map(chunk => chunk.text)
|
|
448
|
+
.join('');
|
|
449
|
+
}
|
|
450
|
+
|
|
451
|
+
function extractTextContent(content: z.infer<typeof mistralContentSchema>) {
|
|
452
|
+
if (typeof content === 'string') {
|
|
453
|
+
return content;
|
|
454
|
+
}
|
|
455
|
+
|
|
456
|
+
if (content == null) {
|
|
457
|
+
return undefined;
|
|
458
|
+
}
|
|
459
|
+
|
|
460
|
+
const textContent: string[] = [];
|
|
461
|
+
|
|
462
|
+
for (const chunk of content) {
|
|
463
|
+
const { type } = chunk;
|
|
464
|
+
|
|
465
|
+
switch (type) {
|
|
466
|
+
case 'text':
|
|
467
|
+
textContent.push(chunk.text);
|
|
468
|
+
break;
|
|
469
|
+
case 'thinking':
|
|
470
|
+
case 'image_url':
|
|
471
|
+
case 'reference':
|
|
472
|
+
// thinking, image content, and reference content are currently ignored
|
|
473
|
+
break;
|
|
474
|
+
default: {
|
|
475
|
+
const _exhaustiveCheck: never = type;
|
|
476
|
+
throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
|
|
477
|
+
}
|
|
478
|
+
}
|
|
479
|
+
}
|
|
480
|
+
|
|
481
|
+
return textContent.length ? textContent.join('') : undefined;
|
|
482
|
+
}
|
|
483
|
+
|
|
484
|
+
const mistralContentSchema = z
|
|
485
|
+
.union([
|
|
486
|
+
z.string(),
|
|
487
|
+
z.array(
|
|
488
|
+
z.discriminatedUnion('type', [
|
|
489
|
+
z.object({
|
|
490
|
+
type: z.literal('text'),
|
|
491
|
+
text: z.string(),
|
|
492
|
+
}),
|
|
493
|
+
z.object({
|
|
494
|
+
type: z.literal('image_url'),
|
|
495
|
+
image_url: z.union([
|
|
496
|
+
z.string(),
|
|
497
|
+
z.object({
|
|
498
|
+
url: z.string(),
|
|
499
|
+
detail: z.string().nullable(),
|
|
500
|
+
}),
|
|
501
|
+
]),
|
|
502
|
+
}),
|
|
503
|
+
z.object({
|
|
504
|
+
type: z.literal('reference'),
|
|
505
|
+
reference_ids: z.array(z.union([z.string(), z.number()])),
|
|
506
|
+
}),
|
|
507
|
+
z.object({
|
|
508
|
+
type: z.literal('thinking'),
|
|
509
|
+
thinking: z.array(
|
|
510
|
+
z.object({
|
|
511
|
+
type: z.literal('text'),
|
|
512
|
+
text: z.string(),
|
|
513
|
+
}),
|
|
514
|
+
),
|
|
515
|
+
}),
|
|
516
|
+
]),
|
|
517
|
+
),
|
|
518
|
+
])
|
|
519
|
+
.nullish();
|
|
520
|
+
|
|
521
|
+
const mistralUsageSchema = z.object({
|
|
522
|
+
prompt_tokens: z.number(),
|
|
523
|
+
completion_tokens: z.number(),
|
|
524
|
+
total_tokens: z.number(),
|
|
525
|
+
});
|
|
526
|
+
|
|
527
|
+
// limited version of the schema, focussed on what is needed for the implementation
|
|
528
|
+
// this approach limits breakages when the API changes and increases efficiency
|
|
529
|
+
const mistralChatResponseSchema = z.object({
|
|
530
|
+
id: z.string().nullish(),
|
|
531
|
+
created: z.number().nullish(),
|
|
532
|
+
model: z.string().nullish(),
|
|
533
|
+
choices: z.array(
|
|
534
|
+
z.object({
|
|
535
|
+
message: z.object({
|
|
536
|
+
role: z.literal('assistant'),
|
|
537
|
+
content: mistralContentSchema,
|
|
538
|
+
tool_calls: z
|
|
539
|
+
.array(
|
|
540
|
+
z.object({
|
|
541
|
+
id: z.string(),
|
|
542
|
+
function: z.object({ name: z.string(), arguments: z.string() }),
|
|
543
|
+
}),
|
|
544
|
+
)
|
|
545
|
+
.nullish(),
|
|
546
|
+
}),
|
|
547
|
+
index: z.number(),
|
|
548
|
+
finish_reason: z.string().nullish(),
|
|
549
|
+
}),
|
|
550
|
+
),
|
|
551
|
+
object: z.literal('chat.completion'),
|
|
552
|
+
usage: mistralUsageSchema,
|
|
553
|
+
});
|
|
554
|
+
|
|
555
|
+
// limited version of the schema, focussed on what is needed for the implementation
|
|
556
|
+
// this approach limits breakages when the API changes and increases efficiency
|
|
557
|
+
const mistralChatChunkSchema = z.object({
|
|
558
|
+
id: z.string().nullish(),
|
|
559
|
+
created: z.number().nullish(),
|
|
560
|
+
model: z.string().nullish(),
|
|
561
|
+
choices: z.array(
|
|
562
|
+
z.object({
|
|
563
|
+
delta: z.object({
|
|
564
|
+
role: z.enum(['assistant']).optional(),
|
|
565
|
+
content: mistralContentSchema,
|
|
566
|
+
tool_calls: z
|
|
567
|
+
.array(
|
|
568
|
+
z.object({
|
|
569
|
+
id: z.string(),
|
|
570
|
+
function: z.object({ name: z.string(), arguments: z.string() }),
|
|
571
|
+
}),
|
|
572
|
+
)
|
|
573
|
+
.nullish(),
|
|
574
|
+
}),
|
|
575
|
+
finish_reason: z.string().nullish(),
|
|
576
|
+
index: z.number(),
|
|
577
|
+
}),
|
|
578
|
+
),
|
|
579
|
+
usage: mistralUsageSchema.nullish(),
|
|
580
|
+
});
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
import { z } from 'zod/v4';
|
|
2
|
+
|
|
3
|
+
// https://docs.mistral.ai/getting-started/models/models_overview/
|
|
4
|
+
export type MistralChatModelId =
|
|
5
|
+
// premier
|
|
6
|
+
| 'ministral-3b-latest'
|
|
7
|
+
| 'ministral-8b-latest'
|
|
8
|
+
| 'mistral-large-latest'
|
|
9
|
+
| 'mistral-medium-latest'
|
|
10
|
+
| 'mistral-medium-2508'
|
|
11
|
+
| 'mistral-medium-2505'
|
|
12
|
+
| 'mistral-small-latest'
|
|
13
|
+
| 'pixtral-large-latest'
|
|
14
|
+
// reasoning models
|
|
15
|
+
| 'magistral-small-2507'
|
|
16
|
+
| 'magistral-medium-2507'
|
|
17
|
+
| 'magistral-small-2506'
|
|
18
|
+
| 'magistral-medium-2506'
|
|
19
|
+
// free
|
|
20
|
+
| 'pixtral-12b-2409'
|
|
21
|
+
// legacy
|
|
22
|
+
| 'open-mistral-7b'
|
|
23
|
+
| 'open-mixtral-8x7b'
|
|
24
|
+
| 'open-mixtral-8x22b'
|
|
25
|
+
| (string & {});
|
|
26
|
+
|
|
27
|
+
export const mistralLanguageModelOptions = z.object({
|
|
28
|
+
/**
|
|
29
|
+
Whether to inject a safety prompt before all conversations.
|
|
30
|
+
|
|
31
|
+
Defaults to `false`.
|
|
32
|
+
*/
|
|
33
|
+
safePrompt: z.boolean().optional(),
|
|
34
|
+
|
|
35
|
+
documentImageLimit: z.number().optional(),
|
|
36
|
+
documentPageLimit: z.number().optional(),
|
|
37
|
+
|
|
38
|
+
/**
|
|
39
|
+
* Whether to use structured outputs.
|
|
40
|
+
*
|
|
41
|
+
* @default true
|
|
42
|
+
*/
|
|
43
|
+
structuredOutputs: z.boolean().optional(),
|
|
44
|
+
|
|
45
|
+
/**
|
|
46
|
+
* Whether to use strict JSON schema validation.
|
|
47
|
+
*
|
|
48
|
+
* @default false
|
|
49
|
+
*/
|
|
50
|
+
strictJsonSchema: z.boolean().optional(),
|
|
51
|
+
|
|
52
|
+
/**
|
|
53
|
+
* Whether to enable parallel function calling during tool use.
|
|
54
|
+
* When set to false, the model will use at most one tool per response.
|
|
55
|
+
*
|
|
56
|
+
* @default true
|
|
57
|
+
*/
|
|
58
|
+
parallelToolCalls: z.boolean().optional(),
|
|
59
|
+
});
|
|
60
|
+
|
|
61
|
+
export type MistralLanguageModelOptions = z.infer<
|
|
62
|
+
typeof mistralLanguageModelOptions
|
|
63
|
+
>;
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
export type MistralPrompt = Array<MistralMessage>;
|
|
2
|
+
|
|
3
|
+
export type MistralMessage =
|
|
4
|
+
| MistralSystemMessage
|
|
5
|
+
| MistralUserMessage
|
|
6
|
+
| MistralAssistantMessage
|
|
7
|
+
| MistralToolMessage;
|
|
8
|
+
|
|
9
|
+
export interface MistralSystemMessage {
|
|
10
|
+
role: 'system';
|
|
11
|
+
content: string;
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
export interface MistralUserMessage {
|
|
15
|
+
role: 'user';
|
|
16
|
+
content: Array<MistralUserMessageContent>;
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
export type MistralUserMessageContent =
|
|
20
|
+
| { type: 'text'; text: string }
|
|
21
|
+
| { type: 'image_url'; image_url: string }
|
|
22
|
+
| { type: 'document_url'; document_url: string };
|
|
23
|
+
|
|
24
|
+
export interface MistralAssistantMessage {
|
|
25
|
+
role: 'assistant';
|
|
26
|
+
content: string;
|
|
27
|
+
prefix?: boolean;
|
|
28
|
+
tool_calls?: Array<{
|
|
29
|
+
id: string;
|
|
30
|
+
type: 'function';
|
|
31
|
+
function: { name: string; arguments: string };
|
|
32
|
+
}>;
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
export interface MistralToolMessage {
|
|
36
|
+
role: 'tool';
|
|
37
|
+
name: string;
|
|
38
|
+
content: string;
|
|
39
|
+
tool_call_id: string;
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
export type MistralToolChoice =
|
|
43
|
+
| { type: 'function'; function: { name: string } }
|
|
44
|
+
| 'auto'
|
|
45
|
+
| 'none'
|
|
46
|
+
| 'any';
|