@ai-sdk/mistral 0.0.0-1c33ba03-20260114162300

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js ADDED
@@ -0,0 +1,888 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
+
20
+ // src/index.ts
21
+ var src_exports = {};
22
+ __export(src_exports, {
23
+ VERSION: () => VERSION,
24
+ createMistral: () => createMistral,
25
+ mistral: () => mistral
26
+ });
27
+ module.exports = __toCommonJS(src_exports);
28
+
29
+ // src/mistral-provider.ts
30
+ var import_provider4 = require("@ai-sdk/provider");
31
+ var import_provider_utils5 = require("@ai-sdk/provider-utils");
32
+
33
+ // src/mistral-chat-language-model.ts
34
+ var import_provider_utils3 = require("@ai-sdk/provider-utils");
35
+ var import_v43 = require("zod/v4");
36
+
37
+ // src/convert-mistral-usage.ts
38
+ function convertMistralUsage(usage) {
39
+ if (usage == null) {
40
+ return {
41
+ inputTokens: {
42
+ total: void 0,
43
+ noCache: void 0,
44
+ cacheRead: void 0,
45
+ cacheWrite: void 0
46
+ },
47
+ outputTokens: {
48
+ total: void 0,
49
+ text: void 0,
50
+ reasoning: void 0
51
+ },
52
+ raw: void 0
53
+ };
54
+ }
55
+ const promptTokens = usage.prompt_tokens;
56
+ const completionTokens = usage.completion_tokens;
57
+ return {
58
+ inputTokens: {
59
+ total: promptTokens,
60
+ noCache: promptTokens,
61
+ cacheRead: void 0,
62
+ cacheWrite: void 0
63
+ },
64
+ outputTokens: {
65
+ total: completionTokens,
66
+ text: completionTokens,
67
+ reasoning: void 0
68
+ },
69
+ raw: usage
70
+ };
71
+ }
72
+
73
+ // src/convert-to-mistral-chat-messages.ts
74
+ var import_provider = require("@ai-sdk/provider");
75
+ var import_provider_utils = require("@ai-sdk/provider-utils");
76
+ function formatFileUrl({
77
+ data,
78
+ mediaType
79
+ }) {
80
+ return data instanceof URL ? data.toString() : `data:${mediaType};base64,${(0, import_provider_utils.convertToBase64)(data)}`;
81
+ }
82
+ function convertToMistralChatMessages(prompt) {
83
+ var _a;
84
+ const messages = [];
85
+ for (let i = 0; i < prompt.length; i++) {
86
+ const { role, content } = prompt[i];
87
+ const isLastMessage = i === prompt.length - 1;
88
+ switch (role) {
89
+ case "system": {
90
+ messages.push({ role: "system", content });
91
+ break;
92
+ }
93
+ case "user": {
94
+ messages.push({
95
+ role: "user",
96
+ content: content.map((part) => {
97
+ switch (part.type) {
98
+ case "text": {
99
+ return { type: "text", text: part.text };
100
+ }
101
+ case "file": {
102
+ if (part.mediaType.startsWith("image/")) {
103
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
104
+ return {
105
+ type: "image_url",
106
+ image_url: formatFileUrl({ data: part.data, mediaType })
107
+ };
108
+ } else if (part.mediaType === "application/pdf") {
109
+ return {
110
+ type: "document_url",
111
+ document_url: formatFileUrl({
112
+ data: part.data,
113
+ mediaType: "application/pdf"
114
+ })
115
+ };
116
+ } else {
117
+ throw new import_provider.UnsupportedFunctionalityError({
118
+ functionality: "Only images and PDF file parts are supported"
119
+ });
120
+ }
121
+ }
122
+ }
123
+ })
124
+ });
125
+ break;
126
+ }
127
+ case "assistant": {
128
+ let text = "";
129
+ const toolCalls = [];
130
+ for (const part of content) {
131
+ switch (part.type) {
132
+ case "text": {
133
+ text += part.text;
134
+ break;
135
+ }
136
+ case "tool-call": {
137
+ toolCalls.push({
138
+ id: part.toolCallId,
139
+ type: "function",
140
+ function: {
141
+ name: part.toolName,
142
+ arguments: JSON.stringify(part.input)
143
+ }
144
+ });
145
+ break;
146
+ }
147
+ case "reasoning": {
148
+ text += part.text;
149
+ break;
150
+ }
151
+ default: {
152
+ throw new Error(
153
+ `Unsupported content type in assistant message: ${part.type}`
154
+ );
155
+ }
156
+ }
157
+ }
158
+ messages.push({
159
+ role: "assistant",
160
+ content: text,
161
+ prefix: isLastMessage ? true : void 0,
162
+ tool_calls: toolCalls.length > 0 ? toolCalls : void 0
163
+ });
164
+ break;
165
+ }
166
+ case "tool": {
167
+ for (const toolResponse of content) {
168
+ if (toolResponse.type === "tool-approval-response") {
169
+ continue;
170
+ }
171
+ const output = toolResponse.output;
172
+ let contentValue;
173
+ switch (output.type) {
174
+ case "text":
175
+ case "error-text":
176
+ contentValue = output.value;
177
+ break;
178
+ case "execution-denied":
179
+ contentValue = (_a = output.reason) != null ? _a : "Tool execution denied.";
180
+ break;
181
+ case "content":
182
+ case "json":
183
+ case "error-json":
184
+ contentValue = JSON.stringify(output.value);
185
+ break;
186
+ }
187
+ messages.push({
188
+ role: "tool",
189
+ name: toolResponse.toolName,
190
+ tool_call_id: toolResponse.toolCallId,
191
+ content: contentValue
192
+ });
193
+ }
194
+ break;
195
+ }
196
+ default: {
197
+ const _exhaustiveCheck = role;
198
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
199
+ }
200
+ }
201
+ }
202
+ return messages;
203
+ }
204
+
205
+ // src/get-response-metadata.ts
206
+ function getResponseMetadata({
207
+ id,
208
+ model,
209
+ created
210
+ }) {
211
+ return {
212
+ id: id != null ? id : void 0,
213
+ modelId: model != null ? model : void 0,
214
+ timestamp: created != null ? new Date(created * 1e3) : void 0
215
+ };
216
+ }
217
+
218
+ // src/map-mistral-finish-reason.ts
219
+ function mapMistralFinishReason(finishReason) {
220
+ switch (finishReason) {
221
+ case "stop":
222
+ return "stop";
223
+ case "length":
224
+ case "model_length":
225
+ return "length";
226
+ case "tool_calls":
227
+ return "tool-calls";
228
+ default:
229
+ return "other";
230
+ }
231
+ }
232
+
233
+ // src/mistral-chat-options.ts
234
+ var import_v4 = require("zod/v4");
235
+ var mistralLanguageModelOptions = import_v4.z.object({
236
+ /**
237
+ Whether to inject a safety prompt before all conversations.
238
+
239
+ Defaults to `false`.
240
+ */
241
+ safePrompt: import_v4.z.boolean().optional(),
242
+ documentImageLimit: import_v4.z.number().optional(),
243
+ documentPageLimit: import_v4.z.number().optional(),
244
+ /**
245
+ * Whether to use structured outputs.
246
+ *
247
+ * @default true
248
+ */
249
+ structuredOutputs: import_v4.z.boolean().optional(),
250
+ /**
251
+ * Whether to use strict JSON schema validation.
252
+ *
253
+ * @default false
254
+ */
255
+ strictJsonSchema: import_v4.z.boolean().optional(),
256
+ /**
257
+ * Whether to enable parallel function calling during tool use.
258
+ * When set to false, the model will use at most one tool per response.
259
+ *
260
+ * @default true
261
+ */
262
+ parallelToolCalls: import_v4.z.boolean().optional()
263
+ });
264
+
265
+ // src/mistral-error.ts
266
+ var import_provider_utils2 = require("@ai-sdk/provider-utils");
267
+ var import_v42 = require("zod/v4");
268
+ var mistralErrorDataSchema = import_v42.z.object({
269
+ object: import_v42.z.literal("error"),
270
+ message: import_v42.z.string(),
271
+ type: import_v42.z.string(),
272
+ param: import_v42.z.string().nullable(),
273
+ code: import_v42.z.string().nullable()
274
+ });
275
+ var mistralFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResponseHandler)({
276
+ errorSchema: mistralErrorDataSchema,
277
+ errorToMessage: (data) => data.message
278
+ });
279
+
280
+ // src/mistral-prepare-tools.ts
281
+ var import_provider2 = require("@ai-sdk/provider");
282
+ function prepareTools({
283
+ tools,
284
+ toolChoice
285
+ }) {
286
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
287
+ const toolWarnings = [];
288
+ if (tools == null) {
289
+ return { tools: void 0, toolChoice: void 0, toolWarnings };
290
+ }
291
+ const mistralTools = [];
292
+ for (const tool of tools) {
293
+ if (tool.type === "provider") {
294
+ toolWarnings.push({
295
+ type: "unsupported",
296
+ feature: `provider-defined tool ${tool.id}`
297
+ });
298
+ } else {
299
+ mistralTools.push({
300
+ type: "function",
301
+ function: {
302
+ name: tool.name,
303
+ description: tool.description,
304
+ parameters: tool.inputSchema,
305
+ ...tool.strict != null ? { strict: tool.strict } : {}
306
+ }
307
+ });
308
+ }
309
+ }
310
+ if (toolChoice == null) {
311
+ return { tools: mistralTools, toolChoice: void 0, toolWarnings };
312
+ }
313
+ const type = toolChoice.type;
314
+ switch (type) {
315
+ case "auto":
316
+ case "none":
317
+ return { tools: mistralTools, toolChoice: type, toolWarnings };
318
+ case "required":
319
+ return { tools: mistralTools, toolChoice: "any", toolWarnings };
320
+ // mistral does not support tool mode directly,
321
+ // so we filter the tools and force the tool choice through 'any'
322
+ case "tool":
323
+ return {
324
+ tools: mistralTools.filter(
325
+ (tool) => tool.function.name === toolChoice.toolName
326
+ ),
327
+ toolChoice: "any",
328
+ toolWarnings
329
+ };
330
+ default: {
331
+ const _exhaustiveCheck = type;
332
+ throw new import_provider2.UnsupportedFunctionalityError({
333
+ functionality: `tool choice type: ${_exhaustiveCheck}`
334
+ });
335
+ }
336
+ }
337
+ }
338
+
339
+ // src/mistral-chat-language-model.ts
340
+ var MistralChatLanguageModel = class {
341
+ constructor(modelId, config) {
342
+ this.specificationVersion = "v3";
343
+ this.supportedUrls = {
344
+ "application/pdf": [/^https:\/\/.*$/]
345
+ };
346
+ var _a;
347
+ this.modelId = modelId;
348
+ this.config = config;
349
+ this.generateId = (_a = config.generateId) != null ? _a : import_provider_utils3.generateId;
350
+ }
351
+ get provider() {
352
+ return this.config.provider;
353
+ }
354
+ async getArgs({
355
+ prompt,
356
+ maxOutputTokens,
357
+ temperature,
358
+ topP,
359
+ topK,
360
+ frequencyPenalty,
361
+ presencePenalty,
362
+ stopSequences,
363
+ responseFormat,
364
+ seed,
365
+ providerOptions,
366
+ tools,
367
+ toolChoice
368
+ }) {
369
+ var _a, _b, _c, _d;
370
+ const warnings = [];
371
+ const options = (_a = await (0, import_provider_utils3.parseProviderOptions)({
372
+ provider: "mistral",
373
+ providerOptions,
374
+ schema: mistralLanguageModelOptions
375
+ })) != null ? _a : {};
376
+ if (topK != null) {
377
+ warnings.push({ type: "unsupported", feature: "topK" });
378
+ }
379
+ if (frequencyPenalty != null) {
380
+ warnings.push({ type: "unsupported", feature: "frequencyPenalty" });
381
+ }
382
+ if (presencePenalty != null) {
383
+ warnings.push({ type: "unsupported", feature: "presencePenalty" });
384
+ }
385
+ if (stopSequences != null) {
386
+ warnings.push({ type: "unsupported", feature: "stopSequences" });
387
+ }
388
+ const structuredOutputs = (_b = options.structuredOutputs) != null ? _b : true;
389
+ const strictJsonSchema = (_c = options.strictJsonSchema) != null ? _c : false;
390
+ if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && !(responseFormat == null ? void 0 : responseFormat.schema)) {
391
+ prompt = (0, import_provider_utils3.injectJsonInstructionIntoMessages)({
392
+ messages: prompt,
393
+ schema: responseFormat.schema
394
+ });
395
+ }
396
+ const baseArgs = {
397
+ // model id:
398
+ model: this.modelId,
399
+ // model specific settings:
400
+ safe_prompt: options.safePrompt,
401
+ // standardized settings:
402
+ max_tokens: maxOutputTokens,
403
+ temperature,
404
+ top_p: topP,
405
+ random_seed: seed,
406
+ // response format:
407
+ response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? structuredOutputs && (responseFormat == null ? void 0 : responseFormat.schema) != null ? {
408
+ type: "json_schema",
409
+ json_schema: {
410
+ schema: responseFormat.schema,
411
+ strict: strictJsonSchema,
412
+ name: (_d = responseFormat.name) != null ? _d : "response",
413
+ description: responseFormat.description
414
+ }
415
+ } : { type: "json_object" } : void 0,
416
+ // mistral-specific provider options:
417
+ document_image_limit: options.documentImageLimit,
418
+ document_page_limit: options.documentPageLimit,
419
+ // messages:
420
+ messages: convertToMistralChatMessages(prompt)
421
+ };
422
+ const {
423
+ tools: mistralTools,
424
+ toolChoice: mistralToolChoice,
425
+ toolWarnings
426
+ } = prepareTools({
427
+ tools,
428
+ toolChoice
429
+ });
430
+ return {
431
+ args: {
432
+ ...baseArgs,
433
+ tools: mistralTools,
434
+ tool_choice: mistralToolChoice,
435
+ ...mistralTools != null && options.parallelToolCalls !== void 0 ? { parallel_tool_calls: options.parallelToolCalls } : {}
436
+ },
437
+ warnings: [...warnings, ...toolWarnings]
438
+ };
439
+ }
440
+ async doGenerate(options) {
441
+ var _a;
442
+ const { args: body, warnings } = await this.getArgs(options);
443
+ const {
444
+ responseHeaders,
445
+ value: response,
446
+ rawValue: rawResponse
447
+ } = await (0, import_provider_utils3.postJsonToApi)({
448
+ url: `${this.config.baseURL}/chat/completions`,
449
+ headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
450
+ body,
451
+ failedResponseHandler: mistralFailedResponseHandler,
452
+ successfulResponseHandler: (0, import_provider_utils3.createJsonResponseHandler)(
453
+ mistralChatResponseSchema
454
+ ),
455
+ abortSignal: options.abortSignal,
456
+ fetch: this.config.fetch
457
+ });
458
+ const choice = response.choices[0];
459
+ const content = [];
460
+ if (choice.message.content != null && Array.isArray(choice.message.content)) {
461
+ for (const part of choice.message.content) {
462
+ if (part.type === "thinking") {
463
+ const reasoningText = extractReasoningContent(part.thinking);
464
+ if (reasoningText.length > 0) {
465
+ content.push({ type: "reasoning", text: reasoningText });
466
+ }
467
+ } else if (part.type === "text") {
468
+ if (part.text.length > 0) {
469
+ content.push({ type: "text", text: part.text });
470
+ }
471
+ }
472
+ }
473
+ } else {
474
+ const text = extractTextContent(choice.message.content);
475
+ if (text != null && text.length > 0) {
476
+ content.push({ type: "text", text });
477
+ }
478
+ }
479
+ if (choice.message.tool_calls != null) {
480
+ for (const toolCall of choice.message.tool_calls) {
481
+ content.push({
482
+ type: "tool-call",
483
+ toolCallId: toolCall.id,
484
+ toolName: toolCall.function.name,
485
+ input: toolCall.function.arguments
486
+ });
487
+ }
488
+ }
489
+ return {
490
+ content,
491
+ finishReason: {
492
+ unified: mapMistralFinishReason(choice.finish_reason),
493
+ raw: (_a = choice.finish_reason) != null ? _a : void 0
494
+ },
495
+ usage: convertMistralUsage(response.usage),
496
+ request: { body },
497
+ response: {
498
+ ...getResponseMetadata(response),
499
+ headers: responseHeaders,
500
+ body: rawResponse
501
+ },
502
+ warnings
503
+ };
504
+ }
505
+ async doStream(options) {
506
+ const { args, warnings } = await this.getArgs(options);
507
+ const body = { ...args, stream: true };
508
+ const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
509
+ url: `${this.config.baseURL}/chat/completions`,
510
+ headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
511
+ body,
512
+ failedResponseHandler: mistralFailedResponseHandler,
513
+ successfulResponseHandler: (0, import_provider_utils3.createEventSourceResponseHandler)(
514
+ mistralChatChunkSchema
515
+ ),
516
+ abortSignal: options.abortSignal,
517
+ fetch: this.config.fetch
518
+ });
519
+ let finishReason = {
520
+ unified: "other",
521
+ raw: void 0
522
+ };
523
+ let usage = void 0;
524
+ let isFirstChunk = true;
525
+ let activeText = false;
526
+ let activeReasoningId = null;
527
+ const generateId2 = this.generateId;
528
+ return {
529
+ stream: response.pipeThrough(
530
+ new TransformStream({
531
+ start(controller) {
532
+ controller.enqueue({ type: "stream-start", warnings });
533
+ },
534
+ transform(chunk, controller) {
535
+ if (options.includeRawChunks) {
536
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
537
+ }
538
+ if (!chunk.success) {
539
+ controller.enqueue({ type: "error", error: chunk.error });
540
+ return;
541
+ }
542
+ const value = chunk.value;
543
+ if (isFirstChunk) {
544
+ isFirstChunk = false;
545
+ controller.enqueue({
546
+ type: "response-metadata",
547
+ ...getResponseMetadata(value)
548
+ });
549
+ }
550
+ if (value.usage != null) {
551
+ usage = value.usage;
552
+ }
553
+ const choice = value.choices[0];
554
+ const delta = choice.delta;
555
+ const textContent = extractTextContent(delta.content);
556
+ if (delta.content != null && Array.isArray(delta.content)) {
557
+ for (const part of delta.content) {
558
+ if (part.type === "thinking") {
559
+ const reasoningDelta = extractReasoningContent(part.thinking);
560
+ if (reasoningDelta.length > 0) {
561
+ if (activeReasoningId == null) {
562
+ if (activeText) {
563
+ controller.enqueue({ type: "text-end", id: "0" });
564
+ activeText = false;
565
+ }
566
+ activeReasoningId = generateId2();
567
+ controller.enqueue({
568
+ type: "reasoning-start",
569
+ id: activeReasoningId
570
+ });
571
+ }
572
+ controller.enqueue({
573
+ type: "reasoning-delta",
574
+ id: activeReasoningId,
575
+ delta: reasoningDelta
576
+ });
577
+ }
578
+ }
579
+ }
580
+ }
581
+ if (textContent != null && textContent.length > 0) {
582
+ if (!activeText) {
583
+ if (activeReasoningId != null) {
584
+ controller.enqueue({
585
+ type: "reasoning-end",
586
+ id: activeReasoningId
587
+ });
588
+ activeReasoningId = null;
589
+ }
590
+ controller.enqueue({ type: "text-start", id: "0" });
591
+ activeText = true;
592
+ }
593
+ controller.enqueue({
594
+ type: "text-delta",
595
+ id: "0",
596
+ delta: textContent
597
+ });
598
+ }
599
+ if ((delta == null ? void 0 : delta.tool_calls) != null) {
600
+ for (const toolCall of delta.tool_calls) {
601
+ const toolCallId = toolCall.id;
602
+ const toolName = toolCall.function.name;
603
+ const input = toolCall.function.arguments;
604
+ controller.enqueue({
605
+ type: "tool-input-start",
606
+ id: toolCallId,
607
+ toolName
608
+ });
609
+ controller.enqueue({
610
+ type: "tool-input-delta",
611
+ id: toolCallId,
612
+ delta: input
613
+ });
614
+ controller.enqueue({
615
+ type: "tool-input-end",
616
+ id: toolCallId
617
+ });
618
+ controller.enqueue({
619
+ type: "tool-call",
620
+ toolCallId,
621
+ toolName,
622
+ input
623
+ });
624
+ }
625
+ }
626
+ if (choice.finish_reason != null) {
627
+ finishReason = {
628
+ unified: mapMistralFinishReason(choice.finish_reason),
629
+ raw: choice.finish_reason
630
+ };
631
+ }
632
+ },
633
+ flush(controller) {
634
+ if (activeReasoningId != null) {
635
+ controller.enqueue({
636
+ type: "reasoning-end",
637
+ id: activeReasoningId
638
+ });
639
+ }
640
+ if (activeText) {
641
+ controller.enqueue({ type: "text-end", id: "0" });
642
+ }
643
+ controller.enqueue({
644
+ type: "finish",
645
+ finishReason,
646
+ usage: convertMistralUsage(usage)
647
+ });
648
+ }
649
+ })
650
+ ),
651
+ request: { body },
652
+ response: { headers: responseHeaders }
653
+ };
654
+ }
655
+ };
656
+ function extractReasoningContent(thinking) {
657
+ return thinking.filter((chunk) => chunk.type === "text").map((chunk) => chunk.text).join("");
658
+ }
659
+ function extractTextContent(content) {
660
+ if (typeof content === "string") {
661
+ return content;
662
+ }
663
+ if (content == null) {
664
+ return void 0;
665
+ }
666
+ const textContent = [];
667
+ for (const chunk of content) {
668
+ const { type } = chunk;
669
+ switch (type) {
670
+ case "text":
671
+ textContent.push(chunk.text);
672
+ break;
673
+ case "thinking":
674
+ case "image_url":
675
+ case "reference":
676
+ break;
677
+ default: {
678
+ const _exhaustiveCheck = type;
679
+ throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
680
+ }
681
+ }
682
+ }
683
+ return textContent.length ? textContent.join("") : void 0;
684
+ }
685
+ var mistralContentSchema = import_v43.z.union([
686
+ import_v43.z.string(),
687
+ import_v43.z.array(
688
+ import_v43.z.discriminatedUnion("type", [
689
+ import_v43.z.object({
690
+ type: import_v43.z.literal("text"),
691
+ text: import_v43.z.string()
692
+ }),
693
+ import_v43.z.object({
694
+ type: import_v43.z.literal("image_url"),
695
+ image_url: import_v43.z.union([
696
+ import_v43.z.string(),
697
+ import_v43.z.object({
698
+ url: import_v43.z.string(),
699
+ detail: import_v43.z.string().nullable()
700
+ })
701
+ ])
702
+ }),
703
+ import_v43.z.object({
704
+ type: import_v43.z.literal("reference"),
705
+ reference_ids: import_v43.z.array(import_v43.z.union([import_v43.z.string(), import_v43.z.number()]))
706
+ }),
707
+ import_v43.z.object({
708
+ type: import_v43.z.literal("thinking"),
709
+ thinking: import_v43.z.array(
710
+ import_v43.z.object({
711
+ type: import_v43.z.literal("text"),
712
+ text: import_v43.z.string()
713
+ })
714
+ )
715
+ })
716
+ ])
717
+ )
718
+ ]).nullish();
719
+ var mistralUsageSchema = import_v43.z.object({
720
+ prompt_tokens: import_v43.z.number(),
721
+ completion_tokens: import_v43.z.number(),
722
+ total_tokens: import_v43.z.number()
723
+ });
724
+ var mistralChatResponseSchema = import_v43.z.object({
725
+ id: import_v43.z.string().nullish(),
726
+ created: import_v43.z.number().nullish(),
727
+ model: import_v43.z.string().nullish(),
728
+ choices: import_v43.z.array(
729
+ import_v43.z.object({
730
+ message: import_v43.z.object({
731
+ role: import_v43.z.literal("assistant"),
732
+ content: mistralContentSchema,
733
+ tool_calls: import_v43.z.array(
734
+ import_v43.z.object({
735
+ id: import_v43.z.string(),
736
+ function: import_v43.z.object({ name: import_v43.z.string(), arguments: import_v43.z.string() })
737
+ })
738
+ ).nullish()
739
+ }),
740
+ index: import_v43.z.number(),
741
+ finish_reason: import_v43.z.string().nullish()
742
+ })
743
+ ),
744
+ object: import_v43.z.literal("chat.completion"),
745
+ usage: mistralUsageSchema
746
+ });
747
+ var mistralChatChunkSchema = import_v43.z.object({
748
+ id: import_v43.z.string().nullish(),
749
+ created: import_v43.z.number().nullish(),
750
+ model: import_v43.z.string().nullish(),
751
+ choices: import_v43.z.array(
752
+ import_v43.z.object({
753
+ delta: import_v43.z.object({
754
+ role: import_v43.z.enum(["assistant"]).optional(),
755
+ content: mistralContentSchema,
756
+ tool_calls: import_v43.z.array(
757
+ import_v43.z.object({
758
+ id: import_v43.z.string(),
759
+ function: import_v43.z.object({ name: import_v43.z.string(), arguments: import_v43.z.string() })
760
+ })
761
+ ).nullish()
762
+ }),
763
+ finish_reason: import_v43.z.string().nullish(),
764
+ index: import_v43.z.number()
765
+ })
766
+ ),
767
+ usage: mistralUsageSchema.nullish()
768
+ });
769
+
770
+ // src/mistral-embedding-model.ts
771
+ var import_provider3 = require("@ai-sdk/provider");
772
+ var import_provider_utils4 = require("@ai-sdk/provider-utils");
773
+ var import_v44 = require("zod/v4");
774
+ var MistralEmbeddingModel = class {
775
+ constructor(modelId, config) {
776
+ this.specificationVersion = "v3";
777
+ this.maxEmbeddingsPerCall = 32;
778
+ this.supportsParallelCalls = false;
779
+ this.modelId = modelId;
780
+ this.config = config;
781
+ }
782
+ get provider() {
783
+ return this.config.provider;
784
+ }
785
+ async doEmbed({
786
+ values,
787
+ abortSignal,
788
+ headers
789
+ }) {
790
+ if (values.length > this.maxEmbeddingsPerCall) {
791
+ throw new import_provider3.TooManyEmbeddingValuesForCallError({
792
+ provider: this.provider,
793
+ modelId: this.modelId,
794
+ maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,
795
+ values
796
+ });
797
+ }
798
+ const {
799
+ responseHeaders,
800
+ value: response,
801
+ rawValue
802
+ } = await (0, import_provider_utils4.postJsonToApi)({
803
+ url: `${this.config.baseURL}/embeddings`,
804
+ headers: (0, import_provider_utils4.combineHeaders)(this.config.headers(), headers),
805
+ body: {
806
+ model: this.modelId,
807
+ input: values,
808
+ encoding_format: "float"
809
+ },
810
+ failedResponseHandler: mistralFailedResponseHandler,
811
+ successfulResponseHandler: (0, import_provider_utils4.createJsonResponseHandler)(
812
+ MistralTextEmbeddingResponseSchema
813
+ ),
814
+ abortSignal,
815
+ fetch: this.config.fetch
816
+ });
817
+ return {
818
+ warnings: [],
819
+ embeddings: response.data.map((item) => item.embedding),
820
+ usage: response.usage ? { tokens: response.usage.prompt_tokens } : void 0,
821
+ response: { headers: responseHeaders, body: rawValue }
822
+ };
823
+ }
824
+ };
825
+ var MistralTextEmbeddingResponseSchema = import_v44.z.object({
826
+ data: import_v44.z.array(import_v44.z.object({ embedding: import_v44.z.array(import_v44.z.number()) })),
827
+ usage: import_v44.z.object({ prompt_tokens: import_v44.z.number() }).nullish()
828
+ });
829
+
830
+ // src/version.ts
831
+ var VERSION = true ? "0.0.0-1c33ba03-20260114162300" : "0.0.0-test";
832
+
833
+ // src/mistral-provider.ts
834
+ function createMistral(options = {}) {
835
+ var _a;
836
+ const baseURL = (_a = (0, import_provider_utils5.withoutTrailingSlash)(options.baseURL)) != null ? _a : "https://api.mistral.ai/v1";
837
+ const getHeaders = () => (0, import_provider_utils5.withUserAgentSuffix)(
838
+ {
839
+ Authorization: `Bearer ${(0, import_provider_utils5.loadApiKey)({
840
+ apiKey: options.apiKey,
841
+ environmentVariableName: "MISTRAL_API_KEY",
842
+ description: "Mistral"
843
+ })}`,
844
+ ...options.headers
845
+ },
846
+ `ai-sdk/mistral/${VERSION}`
847
+ );
848
+ const createChatModel = (modelId) => new MistralChatLanguageModel(modelId, {
849
+ provider: "mistral.chat",
850
+ baseURL,
851
+ headers: getHeaders,
852
+ fetch: options.fetch,
853
+ generateId: options.generateId
854
+ });
855
+ const createEmbeddingModel = (modelId) => new MistralEmbeddingModel(modelId, {
856
+ provider: "mistral.embedding",
857
+ baseURL,
858
+ headers: getHeaders,
859
+ fetch: options.fetch
860
+ });
861
+ const provider = function(modelId) {
862
+ if (new.target) {
863
+ throw new Error(
864
+ "The Mistral model function cannot be called with the new keyword."
865
+ );
866
+ }
867
+ return createChatModel(modelId);
868
+ };
869
+ provider.specificationVersion = "v3";
870
+ provider.languageModel = createChatModel;
871
+ provider.chat = createChatModel;
872
+ provider.embedding = createEmbeddingModel;
873
+ provider.embeddingModel = createEmbeddingModel;
874
+ provider.textEmbedding = createEmbeddingModel;
875
+ provider.textEmbeddingModel = createEmbeddingModel;
876
+ provider.imageModel = (modelId) => {
877
+ throw new import_provider4.NoSuchModelError({ modelId, modelType: "imageModel" });
878
+ };
879
+ return provider;
880
+ }
881
+ var mistral = createMistral();
882
+ // Annotate the CommonJS export names for ESM import in node:
883
+ 0 && (module.exports = {
884
+ VERSION,
885
+ createMistral,
886
+ mistral
887
+ });
888
+ //# sourceMappingURL=index.js.map