@ai-sdk/mistral 1.2.7 → 2.0.0-alpha.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -1,4 +1,7 @@
1
1
  // src/mistral-provider.ts
2
+ import {
3
+ NoSuchModelError
4
+ } from "@ai-sdk/provider";
2
5
  import {
3
6
  loadApiKey,
4
7
  withoutTrailingSlash
@@ -9,15 +12,15 @@ import {
9
12
  combineHeaders,
10
13
  createEventSourceResponseHandler,
11
14
  createJsonResponseHandler,
15
+ parseProviderOptions,
12
16
  postJsonToApi
13
17
  } from "@ai-sdk/provider-utils";
14
- import { z as z2 } from "zod";
18
+ import { z as z3 } from "zod";
15
19
 
16
20
  // src/convert-to-mistral-chat-messages.ts
17
21
  import {
18
22
  UnsupportedFunctionalityError
19
23
  } from "@ai-sdk/provider";
20
- import { convertUint8ArrayToBase64 } from "@ai-sdk/provider-utils";
21
24
  function convertToMistralChatMessages(prompt) {
22
25
  const messages = [];
23
26
  for (let i = 0; i < prompt.length; i++) {
@@ -32,36 +35,27 @@ function convertToMistralChatMessages(prompt) {
32
35
  messages.push({
33
36
  role: "user",
34
37
  content: content.map((part) => {
35
- var _a;
36
38
  switch (part.type) {
37
39
  case "text": {
38
40
  return { type: "text", text: part.text };
39
41
  }
40
- case "image": {
41
- return {
42
- type: "image_url",
43
- image_url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${convertUint8ArrayToBase64(part.image)}`
44
- };
45
- }
46
42
  case "file": {
47
- if (!(part.data instanceof URL)) {
43
+ if (part.mediaType.startsWith("image/")) {
44
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
45
+ return {
46
+ type: "image_url",
47
+ image_url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${part.data}`
48
+ };
49
+ } else if (part.mediaType === "application/pdf") {
50
+ return {
51
+ type: "document_url",
52
+ document_url: part.data.toString()
53
+ };
54
+ } else {
48
55
  throw new UnsupportedFunctionalityError({
49
- functionality: "File content parts in user messages"
56
+ functionality: "Only images and PDF file parts are supported"
50
57
  });
51
58
  }
52
- switch (part.mimeType) {
53
- case "application/pdf": {
54
- return {
55
- type: "document_url",
56
- document_url: part.data.toString()
57
- };
58
- }
59
- default: {
60
- throw new UnsupportedFunctionalityError({
61
- functionality: "Only PDF files are supported in user messages"
62
- });
63
- }
64
- }
65
59
  }
66
60
  }
67
61
  })
@@ -118,6 +112,19 @@ function convertToMistralChatMessages(prompt) {
118
112
  return messages;
119
113
  }
120
114
 
115
+ // src/get-response-metadata.ts
116
+ function getResponseMetadata({
117
+ id,
118
+ model,
119
+ created
120
+ }) {
121
+ return {
122
+ id: id != null ? id : void 0,
123
+ modelId: model != null ? model : void 0,
124
+ timestamp: created != null ? new Date(created * 1e3) : void 0
125
+ };
126
+ }
127
+
121
128
  // src/map-mistral-finish-reason.ts
122
129
  function mapMistralFinishReason(finishReason) {
123
130
  switch (finishReason) {
@@ -133,44 +140,46 @@ function mapMistralFinishReason(finishReason) {
133
140
  }
134
141
  }
135
142
 
143
+ // src/mistral-chat-options.ts
144
+ import { z } from "zod";
145
+ var mistralProviderOptions = z.object({
146
+ /**
147
+ Whether to inject a safety prompt before all conversations.
148
+
149
+ Defaults to `false`.
150
+ */
151
+ safePrompt: z.boolean().optional(),
152
+ documentImageLimit: z.number().optional(),
153
+ documentPageLimit: z.number().optional()
154
+ });
155
+
136
156
  // src/mistral-error.ts
137
157
  import { createJsonErrorResponseHandler } from "@ai-sdk/provider-utils";
138
- import { z } from "zod";
139
- var mistralErrorDataSchema = z.object({
140
- object: z.literal("error"),
141
- message: z.string(),
142
- type: z.string(),
143
- param: z.string().nullable(),
144
- code: z.string().nullable()
158
+ import { z as z2 } from "zod";
159
+ var mistralErrorDataSchema = z2.object({
160
+ object: z2.literal("error"),
161
+ message: z2.string(),
162
+ type: z2.string(),
163
+ param: z2.string().nullable(),
164
+ code: z2.string().nullable()
145
165
  });
146
166
  var mistralFailedResponseHandler = createJsonErrorResponseHandler({
147
167
  errorSchema: mistralErrorDataSchema,
148
168
  errorToMessage: (data) => data.message
149
169
  });
150
170
 
151
- // src/get-response-metadata.ts
152
- function getResponseMetadata({
153
- id,
154
- model,
155
- created
156
- }) {
157
- return {
158
- id: id != null ? id : void 0,
159
- modelId: model != null ? model : void 0,
160
- timestamp: created != null ? new Date(created * 1e3) : void 0
161
- };
162
- }
163
-
164
171
  // src/mistral-prepare-tools.ts
165
172
  import {
166
173
  UnsupportedFunctionalityError as UnsupportedFunctionalityError2
167
174
  } from "@ai-sdk/provider";
168
- function prepareTools(mode) {
169
- var _a;
170
- const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
175
+ function prepareTools({
176
+ tools,
177
+ toolChoice
178
+ }) {
179
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
171
180
  const toolWarnings = [];
172
181
  if (tools == null) {
173
- return { tools: void 0, tool_choice: void 0, toolWarnings };
182
+ return { tools: void 0, toolChoice: void 0, toolWarnings };
174
183
  }
175
184
  const mistralTools = [];
176
185
  for (const tool of tools) {
@@ -187,29 +196,28 @@ function prepareTools(mode) {
187
196
  });
188
197
  }
189
198
  }
190
- const toolChoice = mode.toolChoice;
191
199
  if (toolChoice == null) {
192
- return { tools: mistralTools, tool_choice: void 0, toolWarnings };
200
+ return { tools: mistralTools, toolChoice: void 0, toolWarnings };
193
201
  }
194
202
  const type = toolChoice.type;
195
203
  switch (type) {
196
204
  case "auto":
197
205
  case "none":
198
- return { tools: mistralTools, tool_choice: type, toolWarnings };
206
+ return { tools: mistralTools, toolChoice: type, toolWarnings };
199
207
  case "required":
200
- return { tools: mistralTools, tool_choice: "any", toolWarnings };
208
+ return { tools: mistralTools, toolChoice: "any", toolWarnings };
201
209
  case "tool":
202
210
  return {
203
211
  tools: mistralTools.filter(
204
212
  (tool) => tool.function.name === toolChoice.toolName
205
213
  ),
206
- tool_choice: "any",
214
+ toolChoice: "any",
207
215
  toolWarnings
208
216
  };
209
217
  default: {
210
218
  const _exhaustiveCheck = type;
211
219
  throw new UnsupportedFunctionalityError2({
212
- functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
220
+ functionality: `tool choice type: ${_exhaustiveCheck}`
213
221
  });
214
222
  }
215
223
  }
@@ -217,24 +225,20 @@ function prepareTools(mode) {
217
225
 
218
226
  // src/mistral-chat-language-model.ts
219
227
  var MistralChatLanguageModel = class {
220
- constructor(modelId, settings, config) {
221
- this.specificationVersion = "v1";
222
- this.defaultObjectGenerationMode = "json";
223
- this.supportsImageUrls = false;
228
+ constructor(modelId, config) {
229
+ this.specificationVersion = "v2";
230
+ this.supportedUrls = {
231
+ "application/pdf": [/^https:\/\/.*$/]
232
+ };
224
233
  this.modelId = modelId;
225
- this.settings = settings;
226
234
  this.config = config;
227
235
  }
228
236
  get provider() {
229
237
  return this.config.provider;
230
238
  }
231
- supportsUrl(url) {
232
- return url.protocol === "https:";
233
- }
234
- getArgs({
235
- mode,
239
+ async getArgs({
236
240
  prompt,
237
- maxTokens,
241
+ maxOutputTokens,
238
242
  temperature,
239
243
  topP,
240
244
  topK,
@@ -243,11 +247,17 @@ var MistralChatLanguageModel = class {
243
247
  stopSequences,
244
248
  responseFormat,
245
249
  seed,
246
- providerMetadata
250
+ providerOptions,
251
+ tools,
252
+ toolChoice
247
253
  }) {
248
- var _a, _b;
249
- const type = mode.type;
254
+ var _a;
250
255
  const warnings = [];
256
+ const options = (_a = await parseProviderOptions({
257
+ provider: "mistral",
258
+ providerOptions,
259
+ schema: mistralProviderOptions
260
+ })) != null ? _a : {};
251
261
  if (topK != null) {
252
262
  warnings.push({
253
263
  type: "unsupported-setting",
@@ -283,56 +293,39 @@ var MistralChatLanguageModel = class {
283
293
  // model id:
284
294
  model: this.modelId,
285
295
  // model specific settings:
286
- safe_prompt: this.settings.safePrompt,
296
+ safe_prompt: options.safePrompt,
287
297
  // standardized settings:
288
- max_tokens: maxTokens,
298
+ max_tokens: maxOutputTokens,
289
299
  temperature,
290
300
  top_p: topP,
291
301
  random_seed: seed,
292
302
  // response format:
293
303
  response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? { type: "json_object" } : void 0,
294
304
  // mistral-specific provider options:
295
- document_image_limit: (_a = providerMetadata == null ? void 0 : providerMetadata.mistral) == null ? void 0 : _a.documentImageLimit,
296
- document_page_limit: (_b = providerMetadata == null ? void 0 : providerMetadata.mistral) == null ? void 0 : _b.documentPageLimit,
305
+ document_image_limit: options.documentImageLimit,
306
+ document_page_limit: options.documentPageLimit,
297
307
  // messages:
298
308
  messages: convertToMistralChatMessages(prompt)
299
309
  };
300
- switch (type) {
301
- case "regular": {
302
- const { tools, tool_choice, toolWarnings } = prepareTools(mode);
303
- return {
304
- args: { ...baseArgs, tools, tool_choice },
305
- warnings: [...warnings, ...toolWarnings]
306
- };
307
- }
308
- case "object-json": {
309
- return {
310
- args: {
311
- ...baseArgs,
312
- response_format: { type: "json_object" }
313
- },
314
- warnings
315
- };
316
- }
317
- case "object-tool": {
318
- return {
319
- args: {
320
- ...baseArgs,
321
- tool_choice: "any",
322
- tools: [{ type: "function", function: mode.tool }]
323
- },
324
- warnings
325
- };
326
- }
327
- default: {
328
- const _exhaustiveCheck = type;
329
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
330
- }
331
- }
310
+ const {
311
+ tools: mistralTools,
312
+ toolChoice: mistralToolChoice,
313
+ toolWarnings
314
+ } = prepareTools({
315
+ tools,
316
+ toolChoice
317
+ });
318
+ return {
319
+ args: {
320
+ ...baseArgs,
321
+ tools: mistralTools,
322
+ tool_choice: mistralToolChoice
323
+ },
324
+ warnings: [...warnings, ...toolWarnings]
325
+ };
332
326
  }
333
327
  async doGenerate(options) {
334
- var _a;
335
- const { args, warnings } = this.getArgs(options);
328
+ const { args: body, warnings } = await this.getArgs(options);
336
329
  const {
337
330
  responseHeaders,
338
331
  value: response,
@@ -340,7 +333,7 @@ var MistralChatLanguageModel = class {
340
333
  } = await postJsonToApi({
341
334
  url: `${this.config.baseURL}/chat/completions`,
342
335
  headers: combineHeaders(this.config.headers(), options.headers),
343
- body: args,
336
+ body,
344
337
  failedResponseHandler: mistralFailedResponseHandler,
345
338
  successfulResponseHandler: createJsonResponseHandler(
346
339
  mistralChatResponseSchema
@@ -348,38 +341,46 @@ var MistralChatLanguageModel = class {
348
341
  abortSignal: options.abortSignal,
349
342
  fetch: this.config.fetch
350
343
  });
351
- const { messages: rawPrompt, ...rawSettings } = args;
352
344
  const choice = response.choices[0];
345
+ const content = [];
353
346
  let text = extractTextContent(choice.message.content);
354
- const lastMessage = rawPrompt[rawPrompt.length - 1];
347
+ const lastMessage = body.messages[body.messages.length - 1];
355
348
  if (lastMessage.role === "assistant" && (text == null ? void 0 : text.startsWith(lastMessage.content))) {
356
349
  text = text.slice(lastMessage.content.length);
357
350
  }
351
+ if (text != null && text.length > 0) {
352
+ content.push({ type: "text", text });
353
+ }
354
+ if (choice.message.tool_calls != null) {
355
+ for (const toolCall of choice.message.tool_calls) {
356
+ content.push({
357
+ type: "tool-call",
358
+ toolCallType: "function",
359
+ toolCallId: toolCall.id,
360
+ toolName: toolCall.function.name,
361
+ args: toolCall.function.arguments
362
+ });
363
+ }
364
+ }
358
365
  return {
359
- text,
360
- toolCalls: (_a = choice.message.tool_calls) == null ? void 0 : _a.map((toolCall) => ({
361
- toolCallType: "function",
362
- toolCallId: toolCall.id,
363
- toolName: toolCall.function.name,
364
- args: toolCall.function.arguments
365
- })),
366
+ content,
366
367
  finishReason: mapMistralFinishReason(choice.finish_reason),
367
368
  usage: {
368
- promptTokens: response.usage.prompt_tokens,
369
- completionTokens: response.usage.completion_tokens
369
+ inputTokens: response.usage.prompt_tokens,
370
+ outputTokens: response.usage.completion_tokens,
371
+ totalTokens: response.usage.total_tokens
370
372
  },
371
- rawCall: { rawPrompt, rawSettings },
372
- rawResponse: {
373
+ request: { body },
374
+ response: {
375
+ ...getResponseMetadata(response),
373
376
  headers: responseHeaders,
374
377
  body: rawResponse
375
378
  },
376
- request: { body: JSON.stringify(args) },
377
- response: getResponseMetadata(response),
378
379
  warnings
379
380
  };
380
381
  }
381
382
  async doStream(options) {
382
- const { args, warnings } = this.getArgs(options);
383
+ const { args, warnings } = await this.getArgs(options);
383
384
  const body = { ...args, stream: true };
384
385
  const { responseHeaders, value: response } = await postJsonToApi({
385
386
  url: `${this.config.baseURL}/chat/completions`,
@@ -392,17 +393,20 @@ var MistralChatLanguageModel = class {
392
393
  abortSignal: options.abortSignal,
393
394
  fetch: this.config.fetch
394
395
  });
395
- const { messages: rawPrompt, ...rawSettings } = args;
396
396
  let finishReason = "unknown";
397
- let usage = {
398
- promptTokens: Number.NaN,
399
- completionTokens: Number.NaN
397
+ const usage = {
398
+ inputTokens: void 0,
399
+ outputTokens: void 0,
400
+ totalTokens: void 0
400
401
  };
401
402
  let chunkNumber = 0;
402
403
  let trimLeadingSpace = false;
403
404
  return {
404
405
  stream: response.pipeThrough(
405
406
  new TransformStream({
407
+ start(controller) {
408
+ controller.enqueue({ type: "stream-start", warnings });
409
+ },
406
410
  transform(chunk, controller) {
407
411
  if (!chunk.success) {
408
412
  controller.enqueue({ type: "error", error: chunk.error });
@@ -417,10 +421,9 @@ var MistralChatLanguageModel = class {
417
421
  });
418
422
  }
419
423
  if (value.usage != null) {
420
- usage = {
421
- promptTokens: value.usage.prompt_tokens,
422
- completionTokens: value.usage.completion_tokens
423
- };
424
+ usage.inputTokens = value.usage.prompt_tokens;
425
+ usage.outputTokens = value.usage.completion_tokens;
426
+ usage.totalTokens = value.usage.total_tokens;
424
427
  }
425
428
  const choice = value.choices[0];
426
429
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
@@ -432,7 +435,7 @@ var MistralChatLanguageModel = class {
432
435
  const delta = choice.delta;
433
436
  const textContent = extractTextContent(delta.content);
434
437
  if (chunkNumber <= 2) {
435
- const lastMessage = rawPrompt[rawPrompt.length - 1];
438
+ const lastMessage = body.messages[body.messages.length - 1];
436
439
  if (lastMessage.role === "assistant" && textContent === lastMessage.content.trimEnd()) {
437
440
  if (textContent.length < lastMessage.content.length) {
438
441
  trimLeadingSpace = true;
@@ -442,8 +445,8 @@ var MistralChatLanguageModel = class {
442
445
  }
443
446
  if (textContent != null) {
444
447
  controller.enqueue({
445
- type: "text-delta",
446
- textDelta: trimLeadingSpace ? textContent.trimStart() : textContent
448
+ type: "text",
449
+ text: trimLeadingSpace ? textContent.trimStart() : textContent
447
450
  });
448
451
  trimLeadingSpace = false;
449
452
  }
@@ -471,10 +474,8 @@ var MistralChatLanguageModel = class {
471
474
  }
472
475
  })
473
476
  ),
474
- rawCall: { rawPrompt, rawSettings },
475
- rawResponse: { headers: responseHeaders },
476
- request: { body: JSON.stringify(body) },
477
- warnings
477
+ request: { body },
478
+ response: { headers: responseHeaders }
478
479
  };
479
480
  }
480
481
  };
@@ -503,81 +504,80 @@ function extractTextContent(content) {
503
504
  }
504
505
  return textContent.length ? textContent.join("") : void 0;
505
506
  }
506
- var mistralContentSchema = z2.union([
507
- z2.string(),
508
- z2.array(
509
- z2.discriminatedUnion("type", [
510
- z2.object({
511
- type: z2.literal("text"),
512
- text: z2.string()
507
+ var mistralContentSchema = z3.union([
508
+ z3.string(),
509
+ z3.array(
510
+ z3.discriminatedUnion("type", [
511
+ z3.object({
512
+ type: z3.literal("text"),
513
+ text: z3.string()
513
514
  }),
514
- z2.object({
515
- type: z2.literal("image_url"),
516
- image_url: z2.union([
517
- z2.string(),
518
- z2.object({
519
- url: z2.string(),
520
- detail: z2.string().nullable()
515
+ z3.object({
516
+ type: z3.literal("image_url"),
517
+ image_url: z3.union([
518
+ z3.string(),
519
+ z3.object({
520
+ url: z3.string(),
521
+ detail: z3.string().nullable()
521
522
  })
522
523
  ])
523
524
  }),
524
- z2.object({
525
- type: z2.literal("reference"),
526
- reference_ids: z2.array(z2.number())
525
+ z3.object({
526
+ type: z3.literal("reference"),
527
+ reference_ids: z3.array(z3.number())
527
528
  })
528
529
  ])
529
530
  )
530
531
  ]).nullish();
531
- var mistralChatResponseSchema = z2.object({
532
- id: z2.string().nullish(),
533
- created: z2.number().nullish(),
534
- model: z2.string().nullish(),
535
- choices: z2.array(
536
- z2.object({
537
- message: z2.object({
538
- role: z2.literal("assistant"),
532
+ var mistralUsageSchema = z3.object({
533
+ prompt_tokens: z3.number(),
534
+ completion_tokens: z3.number(),
535
+ total_tokens: z3.number()
536
+ });
537
+ var mistralChatResponseSchema = z3.object({
538
+ id: z3.string().nullish(),
539
+ created: z3.number().nullish(),
540
+ model: z3.string().nullish(),
541
+ choices: z3.array(
542
+ z3.object({
543
+ message: z3.object({
544
+ role: z3.literal("assistant"),
539
545
  content: mistralContentSchema,
540
- tool_calls: z2.array(
541
- z2.object({
542
- id: z2.string(),
543
- function: z2.object({ name: z2.string(), arguments: z2.string() })
546
+ tool_calls: z3.array(
547
+ z3.object({
548
+ id: z3.string(),
549
+ function: z3.object({ name: z3.string(), arguments: z3.string() })
544
550
  })
545
551
  ).nullish()
546
552
  }),
547
- index: z2.number(),
548
- finish_reason: z2.string().nullish()
553
+ index: z3.number(),
554
+ finish_reason: z3.string().nullish()
549
555
  })
550
556
  ),
551
- object: z2.literal("chat.completion"),
552
- usage: z2.object({
553
- prompt_tokens: z2.number(),
554
- completion_tokens: z2.number()
555
- })
557
+ object: z3.literal("chat.completion"),
558
+ usage: mistralUsageSchema
556
559
  });
557
- var mistralChatChunkSchema = z2.object({
558
- id: z2.string().nullish(),
559
- created: z2.number().nullish(),
560
- model: z2.string().nullish(),
561
- choices: z2.array(
562
- z2.object({
563
- delta: z2.object({
564
- role: z2.enum(["assistant"]).optional(),
560
+ var mistralChatChunkSchema = z3.object({
561
+ id: z3.string().nullish(),
562
+ created: z3.number().nullish(),
563
+ model: z3.string().nullish(),
564
+ choices: z3.array(
565
+ z3.object({
566
+ delta: z3.object({
567
+ role: z3.enum(["assistant"]).optional(),
565
568
  content: mistralContentSchema,
566
- tool_calls: z2.array(
567
- z2.object({
568
- id: z2.string(),
569
- function: z2.object({ name: z2.string(), arguments: z2.string() })
569
+ tool_calls: z3.array(
570
+ z3.object({
571
+ id: z3.string(),
572
+ function: z3.object({ name: z3.string(), arguments: z3.string() })
570
573
  })
571
574
  ).nullish()
572
575
  }),
573
- finish_reason: z2.string().nullish(),
574
- index: z2.number()
576
+ finish_reason: z3.string().nullish(),
577
+ index: z3.number()
575
578
  })
576
579
  ),
577
- usage: z2.object({
578
- prompt_tokens: z2.number(),
579
- completion_tokens: z2.number()
580
- }).nullish()
580
+ usage: mistralUsageSchema.nullish()
581
581
  });
582
582
 
583
583
  // src/mistral-embedding-model.ts
@@ -589,25 +589,18 @@ import {
589
589
  createJsonResponseHandler as createJsonResponseHandler2,
590
590
  postJsonToApi as postJsonToApi2
591
591
  } from "@ai-sdk/provider-utils";
592
- import { z as z3 } from "zod";
592
+ import { z as z4 } from "zod";
593
593
  var MistralEmbeddingModel = class {
594
- constructor(modelId, settings, config) {
595
- this.specificationVersion = "v1";
594
+ constructor(modelId, config) {
595
+ this.specificationVersion = "v2";
596
+ this.maxEmbeddingsPerCall = 32;
597
+ this.supportsParallelCalls = false;
596
598
  this.modelId = modelId;
597
- this.settings = settings;
598
599
  this.config = config;
599
600
  }
600
601
  get provider() {
601
602
  return this.config.provider;
602
603
  }
603
- get maxEmbeddingsPerCall() {
604
- var _a;
605
- return (_a = this.settings.maxEmbeddingsPerCall) != null ? _a : 32;
606
- }
607
- get supportsParallelCalls() {
608
- var _a;
609
- return (_a = this.settings.supportsParallelCalls) != null ? _a : false;
610
- }
611
604
  async doEmbed({
612
605
  values,
613
606
  abortSignal,
@@ -621,7 +614,11 @@ var MistralEmbeddingModel = class {
621
614
  values
622
615
  });
623
616
  }
624
- const { responseHeaders, value: response } = await postJsonToApi2({
617
+ const {
618
+ responseHeaders,
619
+ value: response,
620
+ rawValue
621
+ } = await postJsonToApi2({
625
622
  url: `${this.config.baseURL}/embeddings`,
626
623
  headers: combineHeaders2(this.config.headers(), headers),
627
624
  body: {
@@ -639,13 +636,13 @@ var MistralEmbeddingModel = class {
639
636
  return {
640
637
  embeddings: response.data.map((item) => item.embedding),
641
638
  usage: response.usage ? { tokens: response.usage.prompt_tokens } : void 0,
642
- rawResponse: { headers: responseHeaders }
639
+ response: { headers: responseHeaders, body: rawValue }
643
640
  };
644
641
  }
645
642
  };
646
- var MistralTextEmbeddingResponseSchema = z3.object({
647
- data: z3.array(z3.object({ embedding: z3.array(z3.number()) })),
648
- usage: z3.object({ prompt_tokens: z3.number() }).nullish()
643
+ var MistralTextEmbeddingResponseSchema = z4.object({
644
+ data: z4.array(z4.object({ embedding: z4.array(z4.number()) })),
645
+ usage: z4.object({ prompt_tokens: z4.number() }).nullish()
649
646
  });
650
647
 
651
648
  // src/mistral-provider.ts
@@ -660,31 +657,34 @@ function createMistral(options = {}) {
660
657
  })}`,
661
658
  ...options.headers
662
659
  });
663
- const createChatModel = (modelId, settings = {}) => new MistralChatLanguageModel(modelId, settings, {
660
+ const createChatModel = (modelId) => new MistralChatLanguageModel(modelId, {
664
661
  provider: "mistral.chat",
665
662
  baseURL,
666
663
  headers: getHeaders,
667
664
  fetch: options.fetch
668
665
  });
669
- const createEmbeddingModel = (modelId, settings = {}) => new MistralEmbeddingModel(modelId, settings, {
666
+ const createEmbeddingModel = (modelId) => new MistralEmbeddingModel(modelId, {
670
667
  provider: "mistral.embedding",
671
668
  baseURL,
672
669
  headers: getHeaders,
673
670
  fetch: options.fetch
674
671
  });
675
- const provider = function(modelId, settings) {
672
+ const provider = function(modelId) {
676
673
  if (new.target) {
677
674
  throw new Error(
678
675
  "The Mistral model function cannot be called with the new keyword."
679
676
  );
680
677
  }
681
- return createChatModel(modelId, settings);
678
+ return createChatModel(modelId);
682
679
  };
683
680
  provider.languageModel = createChatModel;
684
681
  provider.chat = createChatModel;
685
682
  provider.embedding = createEmbeddingModel;
686
683
  provider.textEmbedding = createEmbeddingModel;
687
684
  provider.textEmbeddingModel = createEmbeddingModel;
685
+ provider.imageModel = (modelId) => {
686
+ throw new NoSuchModelError({ modelId, modelType: "imageModel" });
687
+ };
688
688
  return provider;
689
689
  }
690
690
  var mistral = createMistral();