@mastra/core 1.0.0-beta.6 → 1.0.0-beta.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (203) hide show
  1. package/CHANGELOG.md +195 -0
  2. package/dist/agent/agent-legacy.d.ts.map +1 -1
  3. package/dist/agent/agent.d.ts.map +1 -1
  4. package/dist/agent/index.cjs +9 -9
  5. package/dist/agent/index.js +2 -2
  6. package/dist/agent/message-list/index.cjs +3 -3
  7. package/dist/agent/message-list/index.js +1 -1
  8. package/dist/agent/workflows/prepare-stream/map-results-step.d.ts.map +1 -1
  9. package/dist/agent/workflows/prepare-stream/prepare-memory-step.d.ts.map +1 -1
  10. package/dist/agent/workflows/prepare-stream/schema.d.ts +2 -1
  11. package/dist/agent/workflows/prepare-stream/schema.d.ts.map +1 -1
  12. package/dist/{chunk-NHNXIYDG.cjs → chunk-3EYBRHB2.cjs} +2 -2
  13. package/dist/chunk-3EYBRHB2.cjs.map +1 -0
  14. package/dist/{chunk-AQAIS7FH.cjs → chunk-3JA6VPNM.cjs} +2 -2
  15. package/dist/chunk-3JA6VPNM.cjs.map +1 -0
  16. package/dist/{chunk-WDRA4WKG.js → chunk-4GTU7MB2.js} +5 -5
  17. package/dist/{chunk-WDRA4WKG.js.map → chunk-4GTU7MB2.js.map} +1 -1
  18. package/dist/{chunk-S5FPOCQF.cjs → chunk-6BYHQ52F.cjs} +24 -24
  19. package/dist/{chunk-S5FPOCQF.cjs.map → chunk-6BYHQ52F.cjs.map} +1 -1
  20. package/dist/{chunk-RTROJFEU.cjs → chunk-6C2PU6J4.cjs} +8 -8
  21. package/dist/{chunk-RTROJFEU.cjs.map → chunk-6C2PU6J4.cjs.map} +1 -1
  22. package/dist/{chunk-6GOLI2EY.js → chunk-7PXCGGMO.js} +8 -12
  23. package/dist/chunk-7PXCGGMO.js.map +1 -0
  24. package/dist/chunk-AZQMPE7G.cjs +4981 -0
  25. package/dist/chunk-AZQMPE7G.cjs.map +1 -0
  26. package/dist/{chunk-HNHZGFZY.cjs → chunk-CZEJQSWB.cjs} +10 -2
  27. package/dist/chunk-CZEJQSWB.cjs.map +1 -0
  28. package/dist/{chunk-APMPOYPI.js → chunk-DMPHPHMU.js} +3 -3
  29. package/dist/{chunk-APMPOYPI.js.map → chunk-DMPHPHMU.js.map} +1 -1
  30. package/dist/{chunk-45NA5ZD3.js → chunk-EDNHZZUP.js} +160 -60
  31. package/dist/chunk-EDNHZZUP.js.map +1 -0
  32. package/dist/{chunk-LEZIKVRQ.js → chunk-F54IK3GJ.js} +2 -2
  33. package/dist/chunk-F54IK3GJ.js.map +1 -0
  34. package/dist/{chunk-FYP3AT6W.js → chunk-FNSFXWDN.js} +2 -2
  35. package/dist/chunk-FNSFXWDN.js.map +1 -0
  36. package/dist/{chunk-XY337TTF.js → chunk-GW7BNMGA.js} +4 -4
  37. package/dist/{chunk-XY337TTF.js.map → chunk-GW7BNMGA.js.map} +1 -1
  38. package/dist/{chunk-OPALPWAM.js → chunk-HDMH5IDV.js} +42 -5
  39. package/dist/chunk-HDMH5IDV.js.map +1 -0
  40. package/dist/{chunk-NN3NOFRU.cjs → chunk-HP6TRJ2Q.cjs} +207 -106
  41. package/dist/chunk-HP6TRJ2Q.cjs.map +1 -0
  42. package/dist/{chunk-3TKNXJES.cjs → chunk-KBXFAF37.cjs} +46 -6
  43. package/dist/chunk-KBXFAF37.cjs.map +1 -0
  44. package/dist/{chunk-7ZADRRDW.js → chunk-PK2A5WBG.js} +3 -3
  45. package/dist/chunk-PK2A5WBG.js.map +1 -0
  46. package/dist/{chunk-DXMSVGJ6.js → chunk-QDIYYAA4.js} +28 -59
  47. package/dist/chunk-QDIYYAA4.js.map +1 -0
  48. package/dist/{chunk-TARWUQG6.js → chunk-QHJ7YVNX.js} +8 -8
  49. package/dist/{chunk-TARWUQG6.js.map → chunk-QHJ7YVNX.js.map} +1 -1
  50. package/dist/{chunk-IWTMMUQY.cjs → chunk-QYNZAPFE.cjs} +4 -4
  51. package/dist/{chunk-IWTMMUQY.cjs.map → chunk-QYNZAPFE.cjs.map} +1 -1
  52. package/dist/{chunk-TS7B26US.js → chunk-RWT3XPKO.js} +3 -3
  53. package/dist/{chunk-TS7B26US.js.map → chunk-RWT3XPKO.js.map} +1 -1
  54. package/dist/{chunk-LDWBLENZ.cjs → chunk-SD2BDUJ6.cjs} +28 -32
  55. package/dist/chunk-SD2BDUJ6.cjs.map +1 -0
  56. package/dist/{chunk-NIUEWICQ.js → chunk-SPIUKQDK.js} +8 -8
  57. package/dist/{chunk-NIUEWICQ.js.map → chunk-SPIUKQDK.js.map} +1 -1
  58. package/dist/chunk-TLLXRG2Z.js +4959 -0
  59. package/dist/chunk-TLLXRG2Z.js.map +1 -0
  60. package/dist/{chunk-SVLMF4UZ.cjs → chunk-TQB2HMEC.cjs} +45 -44
  61. package/dist/chunk-TQB2HMEC.cjs.map +1 -0
  62. package/dist/{chunk-KZLP6TPJ.js → chunk-URBPEE67.js} +4 -4
  63. package/dist/{chunk-KZLP6TPJ.js.map → chunk-URBPEE67.js.map} +1 -1
  64. package/dist/{chunk-6J4NRNT2.cjs → chunk-VWC5AUOQ.cjs} +29 -60
  65. package/dist/chunk-VWC5AUOQ.cjs.map +1 -0
  66. package/dist/{chunk-4KUOSXJ7.cjs → chunk-Y5SFNZUK.cjs} +11 -11
  67. package/dist/{chunk-4KUOSXJ7.cjs.map → chunk-Y5SFNZUK.cjs.map} +1 -1
  68. package/dist/{chunk-OIMB2SNZ.cjs → chunk-Y7YCQDML.cjs} +14 -14
  69. package/dist/{chunk-OIMB2SNZ.cjs.map → chunk-Y7YCQDML.cjs.map} +1 -1
  70. package/dist/{chunk-VRFSEZBA.cjs → chunk-YBEW5YWC.cjs} +6 -6
  71. package/dist/{chunk-VRFSEZBA.cjs.map → chunk-YBEW5YWC.cjs.map} +1 -1
  72. package/dist/{chunk-SQAX4OW6.cjs → chunk-YUXTDKYN.cjs} +15 -15
  73. package/dist/{chunk-SQAX4OW6.cjs.map → chunk-YUXTDKYN.cjs.map} +1 -1
  74. package/dist/{chunk-RHTV5C5D.cjs → chunk-YWMMBIOM.cjs} +9 -9
  75. package/dist/{chunk-RHTV5C5D.cjs.map → chunk-YWMMBIOM.cjs.map} +1 -1
  76. package/dist/{chunk-WMAMOYRR.js → chunk-Z57R5WS4.js} +4 -4
  77. package/dist/{chunk-WMAMOYRR.js.map → chunk-Z57R5WS4.js.map} +1 -1
  78. package/dist/{chunk-QM5SRDJX.js → chunk-ZUWJCGLM.js} +5 -4
  79. package/dist/chunk-ZUWJCGLM.js.map +1 -0
  80. package/dist/evals/index.cjs +9 -9
  81. package/dist/evals/index.js +2 -2
  82. package/dist/evals/scoreTraces/index.cjs +5 -5
  83. package/dist/evals/scoreTraces/index.js +2 -2
  84. package/dist/evals/types.d.ts +19 -18
  85. package/dist/evals/types.d.ts.map +1 -1
  86. package/dist/index.cjs +2 -2
  87. package/dist/index.js +1 -1
  88. package/dist/llm/index.cjs +15 -15
  89. package/dist/llm/index.js +5 -5
  90. package/dist/llm/model/gateways/constants.d.ts.map +1 -1
  91. package/dist/llm/model/gateways/models-dev.d.ts.map +1 -1
  92. package/dist/llm/model/model.loop.d.ts +1 -1
  93. package/dist/llm/model/model.loop.d.ts.map +1 -1
  94. package/dist/llm/model/provider-options.d.ts +4 -1
  95. package/dist/llm/model/provider-options.d.ts.map +1 -1
  96. package/dist/llm/model/provider-types.generated.d.ts +12 -22
  97. package/dist/loop/index.cjs +2 -2
  98. package/dist/loop/index.js +1 -1
  99. package/dist/loop/workflows/agentic-execution/llm-execution-step.d.ts.map +1 -1
  100. package/dist/loop/workflows/stream.d.ts.map +1 -1
  101. package/dist/mastra/index.cjs +2 -2
  102. package/dist/mastra/index.js +1 -1
  103. package/dist/memory/index.cjs +6 -6
  104. package/dist/memory/index.js +1 -1
  105. package/dist/models-dev-6GD3644V.js +3 -0
  106. package/dist/{models-dev-F6MTIYTO.js.map → models-dev-6GD3644V.js.map} +1 -1
  107. package/dist/models-dev-IUQBXJSS.cjs +12 -0
  108. package/dist/{models-dev-XIVR5EJV.cjs.map → models-dev-IUQBXJSS.cjs.map} +1 -1
  109. package/dist/netlify-GXJ5D5DD.js +3 -0
  110. package/dist/{netlify-C2YBIK7A.js.map → netlify-GXJ5D5DD.js.map} +1 -1
  111. package/dist/netlify-KJLY3GFS.cjs +12 -0
  112. package/dist/{netlify-XWROOTP4.cjs.map → netlify-KJLY3GFS.cjs.map} +1 -1
  113. package/dist/observability/index.cjs +10 -10
  114. package/dist/observability/index.js +1 -1
  115. package/dist/observability/types/tracing.d.ts +18 -0
  116. package/dist/observability/types/tracing.d.ts.map +1 -1
  117. package/dist/processors/index.cjs +17 -17
  118. package/dist/processors/index.js +1 -1
  119. package/dist/provider-registry-A5QZFI2X.js +3 -0
  120. package/dist/{provider-registry-GRYJWVEM.js.map → provider-registry-A5QZFI2X.js.map} +1 -1
  121. package/dist/provider-registry-V32PGH6F.cjs +40 -0
  122. package/dist/{provider-registry-BXX7CA3H.cjs.map → provider-registry-V32PGH6F.cjs.map} +1 -1
  123. package/dist/provider-registry.json +24 -55
  124. package/dist/relevance/index.cjs +2 -2
  125. package/dist/relevance/index.js +1 -1
  126. package/dist/storage/base.d.ts +22 -1
  127. package/dist/storage/base.d.ts.map +1 -1
  128. package/dist/storage/domains/scores/base.d.ts +2 -2
  129. package/dist/storage/domains/scores/base.d.ts.map +1 -1
  130. package/dist/storage/domains/scores/inmemory.d.ts +2 -2
  131. package/dist/storage/domains/scores/inmemory.d.ts.map +1 -1
  132. package/dist/storage/index.cjs +43 -31
  133. package/dist/storage/index.js +1 -1
  134. package/dist/storage/mock.d.ts +2 -2
  135. package/dist/storage/mock.d.ts.map +1 -1
  136. package/dist/storage/storageWithInit.d.ts.map +1 -1
  137. package/dist/storage/utils.d.ts +38 -0
  138. package/dist/storage/utils.d.ts.map +1 -1
  139. package/dist/stream/aisdk/v5/execute.d.ts +3 -2
  140. package/dist/stream/aisdk/v5/execute.d.ts.map +1 -1
  141. package/dist/stream/aisdk/v5/input.d.ts +4 -1
  142. package/dist/stream/aisdk/v5/input.d.ts.map +1 -1
  143. package/dist/stream/index.cjs +11 -11
  144. package/dist/stream/index.js +2 -2
  145. package/dist/test-utils/llm-mock.cjs +13 -13
  146. package/dist/test-utils/llm-mock.cjs.map +1 -1
  147. package/dist/test-utils/llm-mock.js +6 -6
  148. package/dist/test-utils/llm-mock.js.map +1 -1
  149. package/dist/tools/index.cjs +2 -2
  150. package/dist/tools/index.js +1 -1
  151. package/dist/tools/stream.d.ts +17 -1
  152. package/dist/tools/stream.d.ts.map +1 -1
  153. package/dist/tools/types.d.ts +2 -2
  154. package/dist/tools/types.d.ts.map +1 -1
  155. package/dist/utils.cjs +22 -22
  156. package/dist/utils.js +1 -1
  157. package/dist/vector/index.cjs +2 -2
  158. package/dist/vector/index.js +1 -1
  159. package/dist/workflows/default.d.ts +2 -2
  160. package/dist/workflows/default.d.ts.map +1 -1
  161. package/dist/workflows/evented/index.cjs +10 -10
  162. package/dist/workflows/evented/index.js +1 -1
  163. package/dist/workflows/evented/step-executor.d.ts.map +1 -1
  164. package/dist/workflows/handlers/control-flow.d.ts.map +1 -1
  165. package/dist/workflows/handlers/step.d.ts.map +1 -1
  166. package/dist/workflows/index.cjs +24 -20
  167. package/dist/workflows/index.js +1 -1
  168. package/dist/workflows/step.d.ts +5 -5
  169. package/dist/workflows/step.d.ts.map +1 -1
  170. package/dist/workflows/types.d.ts +1 -0
  171. package/dist/workflows/types.d.ts.map +1 -1
  172. package/dist/workflows/utils.d.ts +10 -1
  173. package/dist/workflows/utils.d.ts.map +1 -1
  174. package/dist/workflows/workflow.d.ts +1 -1
  175. package/dist/workflows/workflow.d.ts.map +1 -1
  176. package/package.json +5 -3
  177. package/src/llm/model/provider-types.generated.d.ts +12 -22
  178. package/dist/chunk-3TKNXJES.cjs.map +0 -1
  179. package/dist/chunk-45NA5ZD3.js.map +0 -1
  180. package/dist/chunk-6GOLI2EY.js.map +0 -1
  181. package/dist/chunk-6J4NRNT2.cjs.map +0 -1
  182. package/dist/chunk-7ZADRRDW.js.map +0 -1
  183. package/dist/chunk-AQAIS7FH.cjs.map +0 -1
  184. package/dist/chunk-DXMSVGJ6.js.map +0 -1
  185. package/dist/chunk-FYP3AT6W.js.map +0 -1
  186. package/dist/chunk-HNHZGFZY.cjs.map +0 -1
  187. package/dist/chunk-LDWBLENZ.cjs.map +0 -1
  188. package/dist/chunk-LEZIKVRQ.js.map +0 -1
  189. package/dist/chunk-NHNXIYDG.cjs.map +0 -1
  190. package/dist/chunk-NN3NOFRU.cjs.map +0 -1
  191. package/dist/chunk-OPALPWAM.js.map +0 -1
  192. package/dist/chunk-QGWNF2QJ.cjs +0 -1697
  193. package/dist/chunk-QGWNF2QJ.cjs.map +0 -1
  194. package/dist/chunk-QM5SRDJX.js.map +0 -1
  195. package/dist/chunk-SVLMF4UZ.cjs.map +0 -1
  196. package/dist/chunk-T2UNO766.js +0 -1694
  197. package/dist/chunk-T2UNO766.js.map +0 -1
  198. package/dist/models-dev-F6MTIYTO.js +0 -3
  199. package/dist/models-dev-XIVR5EJV.cjs +0 -12
  200. package/dist/netlify-C2YBIK7A.js +0 -3
  201. package/dist/netlify-XWROOTP4.cjs +0 -12
  202. package/dist/provider-registry-BXX7CA3H.cjs +0 -40
  203. package/dist/provider-registry-GRYJWVEM.js +0 -3
@@ -1,1694 +0,0 @@
1
- import { createJsonErrorResponseHandler, withoutTrailingSlash, generateId, withUserAgentSuffix, parseProviderOptions, injectJsonInstructionIntoMessages, postJsonToApi, createJsonResponseHandler, combineHeaders, createEventSourceResponseHandler, loadApiKey, convertToBase64, UnsupportedFunctionalityError, NoSuchModelError, MastraModelGateway, createOpenAICompatible, createAnthropic, createGoogleGenerativeAI, createOpenAI, TooManyEmbeddingValuesForCallError, OpenAICompatibleImageModel } from './chunk-7ZADRRDW.js';
2
- import { z } from 'zod/v4';
3
- import { createOpenRouter } from '@openrouter/ai-sdk-provider-v5';
4
-
5
- function convertToMistralChatMessages(prompt) {
6
- const messages = [];
7
- for (let i = 0; i < prompt.length; i++) {
8
- const { role, content } = prompt[i];
9
- const isLastMessage = i === prompt.length - 1;
10
- switch (role) {
11
- case "system": {
12
- messages.push({ role: "system", content });
13
- break;
14
- }
15
- case "user": {
16
- messages.push({
17
- role: "user",
18
- content: content.map((part) => {
19
- switch (part.type) {
20
- case "text": {
21
- return { type: "text", text: part.text };
22
- }
23
- case "file": {
24
- if (part.mediaType.startsWith("image/")) {
25
- const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
26
- return {
27
- type: "image_url",
28
- image_url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${convertToBase64(part.data)}`
29
- };
30
- } else if (part.mediaType === "application/pdf") {
31
- return {
32
- type: "document_url",
33
- document_url: part.data.toString()
34
- };
35
- } else {
36
- throw new UnsupportedFunctionalityError({
37
- functionality: "Only images and PDF file parts are supported"
38
- });
39
- }
40
- }
41
- }
42
- })
43
- });
44
- break;
45
- }
46
- case "assistant": {
47
- let text = "";
48
- const toolCalls = [];
49
- for (const part of content) {
50
- switch (part.type) {
51
- case "text": {
52
- text += part.text;
53
- break;
54
- }
55
- case "tool-call": {
56
- toolCalls.push({
57
- id: part.toolCallId,
58
- type: "function",
59
- function: {
60
- name: part.toolName,
61
- arguments: JSON.stringify(part.input)
62
- }
63
- });
64
- break;
65
- }
66
- case "reasoning": {
67
- text += part.text;
68
- break;
69
- }
70
- default: {
71
- throw new Error(
72
- `Unsupported content type in assistant message: ${part.type}`
73
- );
74
- }
75
- }
76
- }
77
- messages.push({
78
- role: "assistant",
79
- content: text,
80
- prefix: isLastMessage ? true : void 0,
81
- tool_calls: toolCalls.length > 0 ? toolCalls : void 0
82
- });
83
- break;
84
- }
85
- case "tool": {
86
- for (const toolResponse of content) {
87
- const output = toolResponse.output;
88
- let contentValue;
89
- switch (output.type) {
90
- case "text":
91
- case "error-text":
92
- contentValue = output.value;
93
- break;
94
- case "content":
95
- case "json":
96
- case "error-json":
97
- contentValue = JSON.stringify(output.value);
98
- break;
99
- }
100
- messages.push({
101
- role: "tool",
102
- name: toolResponse.toolName,
103
- tool_call_id: toolResponse.toolCallId,
104
- content: contentValue
105
- });
106
- }
107
- break;
108
- }
109
- default: {
110
- const _exhaustiveCheck = role;
111
- throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
112
- }
113
- }
114
- }
115
- return messages;
116
- }
117
- function getResponseMetadata({
118
- id,
119
- model,
120
- created
121
- }) {
122
- return {
123
- id: id != null ? id : void 0,
124
- modelId: model != null ? model : void 0,
125
- timestamp: created != null ? new Date(created * 1e3) : void 0
126
- };
127
- }
128
- function mapMistralFinishReason(finishReason) {
129
- switch (finishReason) {
130
- case "stop":
131
- return "stop";
132
- case "length":
133
- case "model_length":
134
- return "length";
135
- case "tool_calls":
136
- return "tool-calls";
137
- default:
138
- return "unknown";
139
- }
140
- }
141
- var mistralLanguageModelOptions = z.object({
142
- /**
143
- Whether to inject a safety prompt before all conversations.
144
-
145
- Defaults to `false`.
146
- */
147
- safePrompt: z.boolean().optional(),
148
- documentImageLimit: z.number().optional(),
149
- documentPageLimit: z.number().optional(),
150
- /**
151
- * Whether to use structured outputs.
152
- *
153
- * @default true
154
- */
155
- structuredOutputs: z.boolean().optional(),
156
- /**
157
- * Whether to use strict JSON schema validation.
158
- *
159
- * @default false
160
- */
161
- strictJsonSchema: z.boolean().optional(),
162
- /**
163
- * Whether to enable parallel function calling during tool use.
164
- * When set to false, the model will use at most one tool per response.
165
- *
166
- * @default true
167
- */
168
- parallelToolCalls: z.boolean().optional()
169
- });
170
- var mistralErrorDataSchema = z.object({
171
- object: z.literal("error"),
172
- message: z.string(),
173
- type: z.string(),
174
- param: z.string().nullable(),
175
- code: z.string().nullable()
176
- });
177
- var mistralFailedResponseHandler = createJsonErrorResponseHandler({
178
- errorSchema: mistralErrorDataSchema,
179
- errorToMessage: (data) => data.message
180
- });
181
- function prepareTools({
182
- tools,
183
- toolChoice
184
- }) {
185
- tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
186
- const toolWarnings = [];
187
- if (tools == null) {
188
- return { tools: void 0, toolChoice: void 0, toolWarnings };
189
- }
190
- const mistralTools = [];
191
- for (const tool of tools) {
192
- if (tool.type === "provider-defined") {
193
- toolWarnings.push({ type: "unsupported-tool", tool });
194
- } else {
195
- mistralTools.push({
196
- type: "function",
197
- function: {
198
- name: tool.name,
199
- description: tool.description,
200
- parameters: tool.inputSchema
201
- }
202
- });
203
- }
204
- }
205
- if (toolChoice == null) {
206
- return { tools: mistralTools, toolChoice: void 0, toolWarnings };
207
- }
208
- const type = toolChoice.type;
209
- switch (type) {
210
- case "auto":
211
- case "none":
212
- return { tools: mistralTools, toolChoice: type, toolWarnings };
213
- case "required":
214
- return { tools: mistralTools, toolChoice: "any", toolWarnings };
215
- // mistral does not support tool mode directly,
216
- // so we filter the tools and force the tool choice through 'any'
217
- case "tool":
218
- return {
219
- tools: mistralTools.filter(
220
- (tool) => tool.function.name === toolChoice.toolName
221
- ),
222
- toolChoice: "any",
223
- toolWarnings
224
- };
225
- default: {
226
- const _exhaustiveCheck = type;
227
- throw new UnsupportedFunctionalityError({
228
- functionality: `tool choice type: ${_exhaustiveCheck}`
229
- });
230
- }
231
- }
232
- }
233
- var MistralChatLanguageModel = class {
234
- constructor(modelId, config) {
235
- this.specificationVersion = "v2";
236
- this.supportedUrls = {
237
- "application/pdf": [/^https:\/\/.*$/]
238
- };
239
- var _a;
240
- this.modelId = modelId;
241
- this.config = config;
242
- this.generateId = (_a = config.generateId) != null ? _a : generateId;
243
- }
244
- get provider() {
245
- return this.config.provider;
246
- }
247
- async getArgs({
248
- prompt,
249
- maxOutputTokens,
250
- temperature,
251
- topP,
252
- topK,
253
- frequencyPenalty,
254
- presencePenalty,
255
- stopSequences,
256
- responseFormat,
257
- seed,
258
- providerOptions,
259
- tools,
260
- toolChoice
261
- }) {
262
- var _a, _b, _c, _d;
263
- const warnings = [];
264
- const options = (_a = await parseProviderOptions({
265
- provider: "mistral",
266
- providerOptions,
267
- schema: mistralLanguageModelOptions
268
- })) != null ? _a : {};
269
- if (topK != null) {
270
- warnings.push({
271
- type: "unsupported-setting",
272
- setting: "topK"
273
- });
274
- }
275
- if (frequencyPenalty != null) {
276
- warnings.push({
277
- type: "unsupported-setting",
278
- setting: "frequencyPenalty"
279
- });
280
- }
281
- if (presencePenalty != null) {
282
- warnings.push({
283
- type: "unsupported-setting",
284
- setting: "presencePenalty"
285
- });
286
- }
287
- if (stopSequences != null) {
288
- warnings.push({
289
- type: "unsupported-setting",
290
- setting: "stopSequences"
291
- });
292
- }
293
- const structuredOutputs = (_b = options.structuredOutputs) != null ? _b : true;
294
- const strictJsonSchema = (_c = options.strictJsonSchema) != null ? _c : false;
295
- if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && !(responseFormat == null ? void 0 : responseFormat.schema)) {
296
- prompt = injectJsonInstructionIntoMessages({
297
- messages: prompt,
298
- schema: responseFormat.schema
299
- });
300
- }
301
- const baseArgs = {
302
- // model id:
303
- model: this.modelId,
304
- // model specific settings:
305
- safe_prompt: options.safePrompt,
306
- // standardized settings:
307
- max_tokens: maxOutputTokens,
308
- temperature,
309
- top_p: topP,
310
- random_seed: seed,
311
- // response format:
312
- response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? structuredOutputs && (responseFormat == null ? void 0 : responseFormat.schema) != null ? {
313
- type: "json_schema",
314
- json_schema: {
315
- schema: responseFormat.schema,
316
- strict: strictJsonSchema,
317
- name: (_d = responseFormat.name) != null ? _d : "response",
318
- description: responseFormat.description
319
- }
320
- } : { type: "json_object" } : void 0,
321
- // mistral-specific provider options:
322
- document_image_limit: options.documentImageLimit,
323
- document_page_limit: options.documentPageLimit,
324
- // messages:
325
- messages: convertToMistralChatMessages(prompt)
326
- };
327
- const {
328
- tools: mistralTools,
329
- toolChoice: mistralToolChoice,
330
- toolWarnings
331
- } = prepareTools({
332
- tools,
333
- toolChoice
334
- });
335
- return {
336
- args: {
337
- ...baseArgs,
338
- tools: mistralTools,
339
- tool_choice: mistralToolChoice,
340
- ...mistralTools != null && options.parallelToolCalls !== void 0 ? { parallel_tool_calls: options.parallelToolCalls } : {}
341
- },
342
- warnings: [...warnings, ...toolWarnings]
343
- };
344
- }
345
- async doGenerate(options) {
346
- const { args: body, warnings } = await this.getArgs(options);
347
- const {
348
- responseHeaders,
349
- value: response,
350
- rawValue: rawResponse
351
- } = await postJsonToApi({
352
- url: `${this.config.baseURL}/chat/completions`,
353
- headers: combineHeaders(this.config.headers(), options.headers),
354
- body,
355
- failedResponseHandler: mistralFailedResponseHandler,
356
- successfulResponseHandler: createJsonResponseHandler(
357
- mistralChatResponseSchema
358
- ),
359
- abortSignal: options.abortSignal,
360
- fetch: this.config.fetch
361
- });
362
- const choice = response.choices[0];
363
- const content = [];
364
- if (choice.message.content != null && Array.isArray(choice.message.content)) {
365
- for (const part of choice.message.content) {
366
- if (part.type === "thinking") {
367
- const reasoningText = extractReasoningContent(part.thinking);
368
- if (reasoningText.length > 0) {
369
- content.push({ type: "reasoning", text: reasoningText });
370
- }
371
- } else if (part.type === "text") {
372
- if (part.text.length > 0) {
373
- content.push({ type: "text", text: part.text });
374
- }
375
- }
376
- }
377
- } else {
378
- const text = extractTextContent(choice.message.content);
379
- if (text != null && text.length > 0) {
380
- content.push({ type: "text", text });
381
- }
382
- }
383
- if (choice.message.tool_calls != null) {
384
- for (const toolCall of choice.message.tool_calls) {
385
- content.push({
386
- type: "tool-call",
387
- toolCallId: toolCall.id,
388
- toolName: toolCall.function.name,
389
- input: toolCall.function.arguments
390
- });
391
- }
392
- }
393
- return {
394
- content,
395
- finishReason: mapMistralFinishReason(choice.finish_reason),
396
- usage: {
397
- inputTokens: response.usage.prompt_tokens,
398
- outputTokens: response.usage.completion_tokens,
399
- totalTokens: response.usage.total_tokens
400
- },
401
- request: { body },
402
- response: {
403
- ...getResponseMetadata(response),
404
- headers: responseHeaders,
405
- body: rawResponse
406
- },
407
- warnings
408
- };
409
- }
410
- async doStream(options) {
411
- const { args, warnings } = await this.getArgs(options);
412
- const body = { ...args, stream: true };
413
- const { responseHeaders, value: response } = await postJsonToApi({
414
- url: `${this.config.baseURL}/chat/completions`,
415
- headers: combineHeaders(this.config.headers(), options.headers),
416
- body,
417
- failedResponseHandler: mistralFailedResponseHandler,
418
- successfulResponseHandler: createEventSourceResponseHandler(
419
- mistralChatChunkSchema
420
- ),
421
- abortSignal: options.abortSignal,
422
- fetch: this.config.fetch
423
- });
424
- let finishReason = "unknown";
425
- const usage = {
426
- inputTokens: void 0,
427
- outputTokens: void 0,
428
- totalTokens: void 0
429
- };
430
- let isFirstChunk = true;
431
- let activeText = false;
432
- let activeReasoningId = null;
433
- const generateId2 = this.generateId;
434
- return {
435
- stream: response.pipeThrough(
436
- new TransformStream({
437
- start(controller) {
438
- controller.enqueue({ type: "stream-start", warnings });
439
- },
440
- transform(chunk, controller) {
441
- if (options.includeRawChunks) {
442
- controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
443
- }
444
- if (!chunk.success) {
445
- controller.enqueue({ type: "error", error: chunk.error });
446
- return;
447
- }
448
- const value = chunk.value;
449
- if (isFirstChunk) {
450
- isFirstChunk = false;
451
- controller.enqueue({
452
- type: "response-metadata",
453
- ...getResponseMetadata(value)
454
- });
455
- }
456
- if (value.usage != null) {
457
- usage.inputTokens = value.usage.prompt_tokens;
458
- usage.outputTokens = value.usage.completion_tokens;
459
- usage.totalTokens = value.usage.total_tokens;
460
- }
461
- const choice = value.choices[0];
462
- const delta = choice.delta;
463
- const textContent = extractTextContent(delta.content);
464
- if (delta.content != null && Array.isArray(delta.content)) {
465
- for (const part of delta.content) {
466
- if (part.type === "thinking") {
467
- const reasoningDelta = extractReasoningContent(part.thinking);
468
- if (reasoningDelta.length > 0) {
469
- if (activeReasoningId == null) {
470
- if (activeText) {
471
- controller.enqueue({ type: "text-end", id: "0" });
472
- activeText = false;
473
- }
474
- activeReasoningId = generateId2();
475
- controller.enqueue({
476
- type: "reasoning-start",
477
- id: activeReasoningId
478
- });
479
- }
480
- controller.enqueue({
481
- type: "reasoning-delta",
482
- id: activeReasoningId,
483
- delta: reasoningDelta
484
- });
485
- }
486
- }
487
- }
488
- }
489
- if (textContent != null && textContent.length > 0) {
490
- if (!activeText) {
491
- if (activeReasoningId != null) {
492
- controller.enqueue({
493
- type: "reasoning-end",
494
- id: activeReasoningId
495
- });
496
- activeReasoningId = null;
497
- }
498
- controller.enqueue({ type: "text-start", id: "0" });
499
- activeText = true;
500
- }
501
- controller.enqueue({
502
- type: "text-delta",
503
- id: "0",
504
- delta: textContent
505
- });
506
- }
507
- if ((delta == null ? void 0 : delta.tool_calls) != null) {
508
- for (const toolCall of delta.tool_calls) {
509
- const toolCallId = toolCall.id;
510
- const toolName = toolCall.function.name;
511
- const input = toolCall.function.arguments;
512
- controller.enqueue({
513
- type: "tool-input-start",
514
- id: toolCallId,
515
- toolName
516
- });
517
- controller.enqueue({
518
- type: "tool-input-delta",
519
- id: toolCallId,
520
- delta: input
521
- });
522
- controller.enqueue({
523
- type: "tool-input-end",
524
- id: toolCallId
525
- });
526
- controller.enqueue({
527
- type: "tool-call",
528
- toolCallId,
529
- toolName,
530
- input
531
- });
532
- }
533
- }
534
- if (choice.finish_reason != null) {
535
- finishReason = mapMistralFinishReason(choice.finish_reason);
536
- }
537
- },
538
- flush(controller) {
539
- if (activeReasoningId != null) {
540
- controller.enqueue({
541
- type: "reasoning-end",
542
- id: activeReasoningId
543
- });
544
- }
545
- if (activeText) {
546
- controller.enqueue({ type: "text-end", id: "0" });
547
- }
548
- controller.enqueue({
549
- type: "finish",
550
- finishReason,
551
- usage
552
- });
553
- }
554
- })
555
- ),
556
- request: { body },
557
- response: { headers: responseHeaders }
558
- };
559
- }
560
- };
561
- function extractReasoningContent(thinking) {
562
- return thinking.filter((chunk) => chunk.type === "text").map((chunk) => chunk.text).join("");
563
- }
564
- function extractTextContent(content) {
565
- if (typeof content === "string") {
566
- return content;
567
- }
568
- if (content == null) {
569
- return void 0;
570
- }
571
- const textContent = [];
572
- for (const chunk of content) {
573
- const { type } = chunk;
574
- switch (type) {
575
- case "text":
576
- textContent.push(chunk.text);
577
- break;
578
- case "thinking":
579
- case "image_url":
580
- case "reference":
581
- break;
582
- default: {
583
- const _exhaustiveCheck = type;
584
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
585
- }
586
- }
587
- }
588
- return textContent.length ? textContent.join("") : void 0;
589
- }
590
- var mistralContentSchema = z.union([
591
- z.string(),
592
- z.array(
593
- z.discriminatedUnion("type", [
594
- z.object({
595
- type: z.literal("text"),
596
- text: z.string()
597
- }),
598
- z.object({
599
- type: z.literal("image_url"),
600
- image_url: z.union([
601
- z.string(),
602
- z.object({
603
- url: z.string(),
604
- detail: z.string().nullable()
605
- })
606
- ])
607
- }),
608
- z.object({
609
- type: z.literal("reference"),
610
- reference_ids: z.array(z.number())
611
- }),
612
- z.object({
613
- type: z.literal("thinking"),
614
- thinking: z.array(
615
- z.object({
616
- type: z.literal("text"),
617
- text: z.string()
618
- })
619
- )
620
- })
621
- ])
622
- )
623
- ]).nullish();
624
- var mistralUsageSchema = z.object({
625
- prompt_tokens: z.number(),
626
- completion_tokens: z.number(),
627
- total_tokens: z.number()
628
- });
629
- var mistralChatResponseSchema = z.object({
630
- id: z.string().nullish(),
631
- created: z.number().nullish(),
632
- model: z.string().nullish(),
633
- choices: z.array(
634
- z.object({
635
- message: z.object({
636
- role: z.literal("assistant"),
637
- content: mistralContentSchema,
638
- tool_calls: z.array(
639
- z.object({
640
- id: z.string(),
641
- function: z.object({ name: z.string(), arguments: z.string() })
642
- })
643
- ).nullish()
644
- }),
645
- index: z.number(),
646
- finish_reason: z.string().nullish()
647
- })
648
- ),
649
- object: z.literal("chat.completion"),
650
- usage: mistralUsageSchema
651
- });
652
- var mistralChatChunkSchema = z.object({
653
- id: z.string().nullish(),
654
- created: z.number().nullish(),
655
- model: z.string().nullish(),
656
- choices: z.array(
657
- z.object({
658
- delta: z.object({
659
- role: z.enum(["assistant"]).optional(),
660
- content: mistralContentSchema,
661
- tool_calls: z.array(
662
- z.object({
663
- id: z.string(),
664
- function: z.object({ name: z.string(), arguments: z.string() })
665
- })
666
- ).nullish()
667
- }),
668
- finish_reason: z.string().nullish(),
669
- index: z.number()
670
- })
671
- ),
672
- usage: mistralUsageSchema.nullish()
673
- });
674
- var MistralEmbeddingModel = class {
675
- constructor(modelId, config) {
676
- this.specificationVersion = "v2";
677
- this.maxEmbeddingsPerCall = 32;
678
- this.supportsParallelCalls = false;
679
- this.modelId = modelId;
680
- this.config = config;
681
- }
682
- get provider() {
683
- return this.config.provider;
684
- }
685
- async doEmbed({
686
- values,
687
- abortSignal,
688
- headers
689
- }) {
690
- if (values.length > this.maxEmbeddingsPerCall) {
691
- throw new TooManyEmbeddingValuesForCallError({
692
- provider: this.provider,
693
- modelId: this.modelId,
694
- maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,
695
- values
696
- });
697
- }
698
- const {
699
- responseHeaders,
700
- value: response,
701
- rawValue
702
- } = await postJsonToApi({
703
- url: `${this.config.baseURL}/embeddings`,
704
- headers: combineHeaders(this.config.headers(), headers),
705
- body: {
706
- model: this.modelId,
707
- input: values,
708
- encoding_format: "float"
709
- },
710
- failedResponseHandler: mistralFailedResponseHandler,
711
- successfulResponseHandler: createJsonResponseHandler(
712
- MistralTextEmbeddingResponseSchema
713
- ),
714
- abortSignal,
715
- fetch: this.config.fetch
716
- });
717
- return {
718
- embeddings: response.data.map((item) => item.embedding),
719
- usage: response.usage ? { tokens: response.usage.prompt_tokens } : void 0,
720
- response: { headers: responseHeaders, body: rawValue }
721
- };
722
- }
723
- };
724
- var MistralTextEmbeddingResponseSchema = z.object({
725
- data: z.array(z.object({ embedding: z.array(z.number()) })),
726
- usage: z.object({ prompt_tokens: z.number() }).nullish()
727
- });
728
- var VERSION = "2.0.24" ;
729
- function createMistral(options = {}) {
730
- var _a;
731
- const baseURL = (_a = withoutTrailingSlash(options.baseURL)) != null ? _a : "https://api.mistral.ai/v1";
732
- const getHeaders = () => withUserAgentSuffix(
733
- {
734
- Authorization: `Bearer ${loadApiKey({
735
- apiKey: options.apiKey,
736
- environmentVariableName: "MISTRAL_API_KEY",
737
- description: "Mistral"
738
- })}`,
739
- ...options.headers
740
- },
741
- `ai-sdk/mistral/${VERSION}`
742
- );
743
- const createChatModel = (modelId) => new MistralChatLanguageModel(modelId, {
744
- provider: "mistral.chat",
745
- baseURL,
746
- headers: getHeaders,
747
- fetch: options.fetch,
748
- generateId: options.generateId
749
- });
750
- const createEmbeddingModel = (modelId) => new MistralEmbeddingModel(modelId, {
751
- provider: "mistral.embedding",
752
- baseURL,
753
- headers: getHeaders,
754
- fetch: options.fetch
755
- });
756
- const provider = function(modelId) {
757
- if (new.target) {
758
- throw new Error(
759
- "The Mistral model function cannot be called with the new keyword."
760
- );
761
- }
762
- return createChatModel(modelId);
763
- };
764
- provider.languageModel = createChatModel;
765
- provider.chat = createChatModel;
766
- provider.embedding = createEmbeddingModel;
767
- provider.textEmbedding = createEmbeddingModel;
768
- provider.textEmbeddingModel = createEmbeddingModel;
769
- provider.imageModel = (modelId) => {
770
- throw new NoSuchModelError({ modelId, modelType: "imageModel" });
771
- };
772
- return provider;
773
- }
774
- createMistral();
775
- function convertToXaiChatMessages(prompt) {
776
- const messages = [];
777
- const warnings = [];
778
- for (const { role, content } of prompt) {
779
- switch (role) {
780
- case "system": {
781
- messages.push({ role: "system", content });
782
- break;
783
- }
784
- case "user": {
785
- if (content.length === 1 && content[0].type === "text") {
786
- messages.push({ role: "user", content: content[0].text });
787
- break;
788
- }
789
- messages.push({
790
- role: "user",
791
- content: content.map((part) => {
792
- switch (part.type) {
793
- case "text": {
794
- return { type: "text", text: part.text };
795
- }
796
- case "file": {
797
- if (part.mediaType.startsWith("image/")) {
798
- const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
799
- return {
800
- type: "image_url",
801
- image_url: {
802
- url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${convertToBase64(part.data)}`
803
- }
804
- };
805
- } else {
806
- throw new UnsupportedFunctionalityError({
807
- functionality: `file part media type ${part.mediaType}`
808
- });
809
- }
810
- }
811
- }
812
- })
813
- });
814
- break;
815
- }
816
- case "assistant": {
817
- let text = "";
818
- const toolCalls = [];
819
- for (const part of content) {
820
- switch (part.type) {
821
- case "text": {
822
- text += part.text;
823
- break;
824
- }
825
- case "tool-call": {
826
- toolCalls.push({
827
- id: part.toolCallId,
828
- type: "function",
829
- function: {
830
- name: part.toolName,
831
- arguments: JSON.stringify(part.input)
832
- }
833
- });
834
- break;
835
- }
836
- }
837
- }
838
- messages.push({
839
- role: "assistant",
840
- content: text,
841
- tool_calls: toolCalls.length > 0 ? toolCalls : void 0
842
- });
843
- break;
844
- }
845
- case "tool": {
846
- for (const toolResponse of content) {
847
- const output = toolResponse.output;
848
- let contentValue;
849
- switch (output.type) {
850
- case "text":
851
- case "error-text":
852
- contentValue = output.value;
853
- break;
854
- case "content":
855
- case "json":
856
- case "error-json":
857
- contentValue = JSON.stringify(output.value);
858
- break;
859
- }
860
- messages.push({
861
- role: "tool",
862
- tool_call_id: toolResponse.toolCallId,
863
- content: contentValue
864
- });
865
- }
866
- break;
867
- }
868
- default: {
869
- const _exhaustiveCheck = role;
870
- throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
871
- }
872
- }
873
- }
874
- return { messages, warnings };
875
- }
876
- function getResponseMetadata2({
877
- id,
878
- model,
879
- created
880
- }) {
881
- return {
882
- id: id != null ? id : void 0,
883
- modelId: model != null ? model : void 0,
884
- timestamp: created != null ? new Date(created * 1e3) : void 0
885
- };
886
- }
887
- function mapXaiFinishReason(finishReason) {
888
- switch (finishReason) {
889
- case "stop":
890
- return "stop";
891
- case "length":
892
- return "length";
893
- case "tool_calls":
894
- case "function_call":
895
- return "tool-calls";
896
- case "content_filter":
897
- return "content-filter";
898
- default:
899
- return "unknown";
900
- }
901
- }
902
- var webSourceSchema = z.object({
903
- type: z.literal("web"),
904
- country: z.string().length(2).optional(),
905
- excludedWebsites: z.array(z.string()).max(5).optional(),
906
- allowedWebsites: z.array(z.string()).max(5).optional(),
907
- safeSearch: z.boolean().optional()
908
- });
909
- var xSourceSchema = z.object({
910
- type: z.literal("x"),
911
- excludedXHandles: z.array(z.string()).optional(),
912
- includedXHandles: z.array(z.string()).optional(),
913
- postFavoriteCount: z.number().int().optional(),
914
- postViewCount: z.number().int().optional(),
915
- /**
916
- * @deprecated use `includedXHandles` instead
917
- */
918
- xHandles: z.array(z.string()).optional()
919
- });
920
- var newsSourceSchema = z.object({
921
- type: z.literal("news"),
922
- country: z.string().length(2).optional(),
923
- excludedWebsites: z.array(z.string()).max(5).optional(),
924
- safeSearch: z.boolean().optional()
925
- });
926
- var rssSourceSchema = z.object({
927
- type: z.literal("rss"),
928
- links: z.array(z.string().url()).max(1)
929
- // currently only supports one RSS link
930
- });
931
- var searchSourceSchema = z.discriminatedUnion("type", [
932
- webSourceSchema,
933
- xSourceSchema,
934
- newsSourceSchema,
935
- rssSourceSchema
936
- ]);
937
- var xaiProviderOptions = z.object({
938
- reasoningEffort: z.enum(["low", "high"]).optional(),
939
- /**
940
- * Whether to enable parallel function calling during tool use.
941
- * When true, the model can call multiple functions in parallel.
942
- * When false, the model will call functions sequentially.
943
- * Defaults to true.
944
- */
945
- parallel_function_calling: z.boolean().optional(),
946
- searchParameters: z.object({
947
- /**
948
- * search mode preference
949
- * - "off": disables search completely
950
- * - "auto": model decides whether to search (default)
951
- * - "on": always enables search
952
- */
953
- mode: z.enum(["off", "auto", "on"]),
954
- /**
955
- * whether to return citations in the response
956
- * defaults to true
957
- */
958
- returnCitations: z.boolean().optional(),
959
- /**
960
- * start date for search data (ISO8601 format: YYYY-MM-DD)
961
- */
962
- fromDate: z.string().optional(),
963
- /**
964
- * end date for search data (ISO8601 format: YYYY-MM-DD)
965
- */
966
- toDate: z.string().optional(),
967
- /**
968
- * maximum number of search results to consider
969
- * defaults to 20
970
- */
971
- maxSearchResults: z.number().min(1).max(50).optional(),
972
- /**
973
- * data sources to search from
974
- * defaults to ["web", "x"] if not specified
975
- */
976
- sources: z.array(searchSourceSchema).optional()
977
- }).optional()
978
- });
979
- var xaiErrorDataSchema = z.object({
980
- error: z.object({
981
- message: z.string(),
982
- type: z.string().nullish(),
983
- param: z.any().nullish(),
984
- code: z.union([z.string(), z.number()]).nullish()
985
- })
986
- });
987
- var xaiFailedResponseHandler = createJsonErrorResponseHandler({
988
- errorSchema: xaiErrorDataSchema,
989
- errorToMessage: (data) => data.error.message
990
- });
991
- function prepareTools2({
992
- tools,
993
- toolChoice
994
- }) {
995
- tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
996
- const toolWarnings = [];
997
- if (tools == null) {
998
- return { tools: void 0, toolChoice: void 0, toolWarnings };
999
- }
1000
- const xaiTools = [];
1001
- for (const tool of tools) {
1002
- if (tool.type === "provider-defined") {
1003
- toolWarnings.push({ type: "unsupported-tool", tool });
1004
- } else {
1005
- xaiTools.push({
1006
- type: "function",
1007
- function: {
1008
- name: tool.name,
1009
- description: tool.description,
1010
- parameters: tool.inputSchema
1011
- }
1012
- });
1013
- }
1014
- }
1015
- if (toolChoice == null) {
1016
- return { tools: xaiTools, toolChoice: void 0, toolWarnings };
1017
- }
1018
- const type = toolChoice.type;
1019
- switch (type) {
1020
- case "auto":
1021
- case "none":
1022
- return { tools: xaiTools, toolChoice: type, toolWarnings };
1023
- case "required":
1024
- return { tools: xaiTools, toolChoice: "required", toolWarnings };
1025
- case "tool":
1026
- return {
1027
- tools: xaiTools,
1028
- toolChoice: {
1029
- type: "function",
1030
- function: { name: toolChoice.toolName }
1031
- },
1032
- toolWarnings
1033
- };
1034
- default: {
1035
- const _exhaustiveCheck = type;
1036
- throw new UnsupportedFunctionalityError({
1037
- functionality: `tool choice type: ${_exhaustiveCheck}`
1038
- });
1039
- }
1040
- }
1041
- }
1042
- var XaiChatLanguageModel = class {
1043
- constructor(modelId, config) {
1044
- this.specificationVersion = "v2";
1045
- this.supportedUrls = {
1046
- "image/*": [/^https?:\/\/.*$/]
1047
- };
1048
- this.modelId = modelId;
1049
- this.config = config;
1050
- }
1051
- get provider() {
1052
- return this.config.provider;
1053
- }
1054
- async getArgs({
1055
- prompt,
1056
- maxOutputTokens,
1057
- temperature,
1058
- topP,
1059
- topK,
1060
- frequencyPenalty,
1061
- presencePenalty,
1062
- stopSequences,
1063
- seed,
1064
- responseFormat,
1065
- providerOptions,
1066
- tools,
1067
- toolChoice
1068
- }) {
1069
- var _a, _b, _c;
1070
- const warnings = [];
1071
- const options = (_a = await parseProviderOptions({
1072
- provider: "xai",
1073
- providerOptions,
1074
- schema: xaiProviderOptions
1075
- })) != null ? _a : {};
1076
- if (topK != null) {
1077
- warnings.push({
1078
- type: "unsupported-setting",
1079
- setting: "topK"
1080
- });
1081
- }
1082
- if (frequencyPenalty != null) {
1083
- warnings.push({
1084
- type: "unsupported-setting",
1085
- setting: "frequencyPenalty"
1086
- });
1087
- }
1088
- if (presencePenalty != null) {
1089
- warnings.push({
1090
- type: "unsupported-setting",
1091
- setting: "presencePenalty"
1092
- });
1093
- }
1094
- if (stopSequences != null) {
1095
- warnings.push({
1096
- type: "unsupported-setting",
1097
- setting: "stopSequences"
1098
- });
1099
- }
1100
- const { messages, warnings: messageWarnings } = convertToXaiChatMessages(prompt);
1101
- warnings.push(...messageWarnings);
1102
- const {
1103
- tools: xaiTools,
1104
- toolChoice: xaiToolChoice,
1105
- toolWarnings
1106
- } = prepareTools2({
1107
- tools,
1108
- toolChoice
1109
- });
1110
- warnings.push(...toolWarnings);
1111
- const baseArgs = {
1112
- // model id
1113
- model: this.modelId,
1114
- // standard generation settings
1115
- max_tokens: maxOutputTokens,
1116
- temperature,
1117
- top_p: topP,
1118
- seed,
1119
- reasoning_effort: options.reasoningEffort,
1120
- // parallel function calling
1121
- parallel_function_calling: options.parallel_function_calling,
1122
- // response format
1123
- response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? responseFormat.schema != null ? {
1124
- type: "json_schema",
1125
- json_schema: {
1126
- name: (_b = responseFormat.name) != null ? _b : "response",
1127
- schema: responseFormat.schema,
1128
- strict: true
1129
- }
1130
- } : { type: "json_object" } : void 0,
1131
- // search parameters
1132
- search_parameters: options.searchParameters ? {
1133
- mode: options.searchParameters.mode,
1134
- return_citations: options.searchParameters.returnCitations,
1135
- from_date: options.searchParameters.fromDate,
1136
- to_date: options.searchParameters.toDate,
1137
- max_search_results: options.searchParameters.maxSearchResults,
1138
- sources: (_c = options.searchParameters.sources) == null ? void 0 : _c.map((source) => {
1139
- var _a2;
1140
- return {
1141
- type: source.type,
1142
- ...source.type === "web" && {
1143
- country: source.country,
1144
- excluded_websites: source.excludedWebsites,
1145
- allowed_websites: source.allowedWebsites,
1146
- safe_search: source.safeSearch
1147
- },
1148
- ...source.type === "x" && {
1149
- excluded_x_handles: source.excludedXHandles,
1150
- included_x_handles: (_a2 = source.includedXHandles) != null ? _a2 : source.xHandles,
1151
- post_favorite_count: source.postFavoriteCount,
1152
- post_view_count: source.postViewCount
1153
- },
1154
- ...source.type === "news" && {
1155
- country: source.country,
1156
- excluded_websites: source.excludedWebsites,
1157
- safe_search: source.safeSearch
1158
- },
1159
- ...source.type === "rss" && {
1160
- links: source.links
1161
- }
1162
- };
1163
- })
1164
- } : void 0,
1165
- // messages in xai format
1166
- messages,
1167
- // tools in xai format
1168
- tools: xaiTools,
1169
- tool_choice: xaiToolChoice
1170
- };
1171
- return {
1172
- args: baseArgs,
1173
- warnings
1174
- };
1175
- }
1176
- async doGenerate(options) {
1177
- var _a, _b, _c;
1178
- const { args: body, warnings } = await this.getArgs(options);
1179
- const {
1180
- responseHeaders,
1181
- value: response,
1182
- rawValue: rawResponse
1183
- } = await postJsonToApi({
1184
- url: `${(_a = this.config.baseURL) != null ? _a : "https://api.x.ai/v1"}/chat/completions`,
1185
- headers: combineHeaders(this.config.headers(), options.headers),
1186
- body,
1187
- failedResponseHandler: xaiFailedResponseHandler,
1188
- successfulResponseHandler: createJsonResponseHandler(
1189
- xaiChatResponseSchema
1190
- ),
1191
- abortSignal: options.abortSignal,
1192
- fetch: this.config.fetch
1193
- });
1194
- const choice = response.choices[0];
1195
- const content = [];
1196
- if (choice.message.content != null && choice.message.content.length > 0) {
1197
- let text = choice.message.content;
1198
- const lastMessage = body.messages[body.messages.length - 1];
1199
- if ((lastMessage == null ? void 0 : lastMessage.role) === "assistant" && text === lastMessage.content) {
1200
- text = "";
1201
- }
1202
- if (text.length > 0) {
1203
- content.push({ type: "text", text });
1204
- }
1205
- }
1206
- if (choice.message.reasoning_content != null && choice.message.reasoning_content.length > 0) {
1207
- content.push({
1208
- type: "reasoning",
1209
- text: choice.message.reasoning_content
1210
- });
1211
- }
1212
- if (choice.message.tool_calls != null) {
1213
- for (const toolCall of choice.message.tool_calls) {
1214
- content.push({
1215
- type: "tool-call",
1216
- toolCallId: toolCall.id,
1217
- toolName: toolCall.function.name,
1218
- input: toolCall.function.arguments
1219
- });
1220
- }
1221
- }
1222
- if (response.citations != null) {
1223
- for (const url of response.citations) {
1224
- content.push({
1225
- type: "source",
1226
- sourceType: "url",
1227
- id: this.config.generateId(),
1228
- url
1229
- });
1230
- }
1231
- }
1232
- return {
1233
- content,
1234
- finishReason: mapXaiFinishReason(choice.finish_reason),
1235
- usage: {
1236
- inputTokens: response.usage.prompt_tokens,
1237
- outputTokens: response.usage.completion_tokens,
1238
- totalTokens: response.usage.total_tokens,
1239
- reasoningTokens: (_c = (_b = response.usage.completion_tokens_details) == null ? void 0 : _b.reasoning_tokens) != null ? _c : void 0
1240
- },
1241
- request: { body },
1242
- response: {
1243
- ...getResponseMetadata2(response),
1244
- headers: responseHeaders,
1245
- body: rawResponse
1246
- },
1247
- warnings
1248
- };
1249
- }
1250
- async doStream(options) {
1251
- var _a;
1252
- const { args, warnings } = await this.getArgs(options);
1253
- const body = {
1254
- ...args,
1255
- stream: true,
1256
- stream_options: {
1257
- include_usage: true
1258
- }
1259
- };
1260
- const { responseHeaders, value: response } = await postJsonToApi({
1261
- url: `${(_a = this.config.baseURL) != null ? _a : "https://api.x.ai/v1"}/chat/completions`,
1262
- headers: combineHeaders(this.config.headers(), options.headers),
1263
- body,
1264
- failedResponseHandler: xaiFailedResponseHandler,
1265
- successfulResponseHandler: createEventSourceResponseHandler(xaiChatChunkSchema),
1266
- abortSignal: options.abortSignal,
1267
- fetch: this.config.fetch
1268
- });
1269
- let finishReason = "unknown";
1270
- const usage = {
1271
- inputTokens: void 0,
1272
- outputTokens: void 0,
1273
- totalTokens: void 0
1274
- };
1275
- let isFirstChunk = true;
1276
- const contentBlocks = {};
1277
- const lastReasoningDeltas = {};
1278
- const self = this;
1279
- return {
1280
- stream: response.pipeThrough(
1281
- new TransformStream({
1282
- start(controller) {
1283
- controller.enqueue({ type: "stream-start", warnings });
1284
- },
1285
- transform(chunk, controller) {
1286
- var _a2, _b;
1287
- if (options.includeRawChunks) {
1288
- controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
1289
- }
1290
- if (!chunk.success) {
1291
- controller.enqueue({ type: "error", error: chunk.error });
1292
- return;
1293
- }
1294
- const value = chunk.value;
1295
- if (isFirstChunk) {
1296
- controller.enqueue({
1297
- type: "response-metadata",
1298
- ...getResponseMetadata2(value)
1299
- });
1300
- isFirstChunk = false;
1301
- }
1302
- if (value.citations != null) {
1303
- for (const url of value.citations) {
1304
- controller.enqueue({
1305
- type: "source",
1306
- sourceType: "url",
1307
- id: self.config.generateId(),
1308
- url
1309
- });
1310
- }
1311
- }
1312
- if (value.usage != null) {
1313
- usage.inputTokens = value.usage.prompt_tokens;
1314
- usage.outputTokens = value.usage.completion_tokens;
1315
- usage.totalTokens = value.usage.total_tokens;
1316
- usage.reasoningTokens = (_b = (_a2 = value.usage.completion_tokens_details) == null ? void 0 : _a2.reasoning_tokens) != null ? _b : void 0;
1317
- }
1318
- const choice = value.choices[0];
1319
- if ((choice == null ? void 0 : choice.finish_reason) != null) {
1320
- finishReason = mapXaiFinishReason(choice.finish_reason);
1321
- }
1322
- if ((choice == null ? void 0 : choice.delta) == null) {
1323
- return;
1324
- }
1325
- const delta = choice.delta;
1326
- const choiceIndex = choice.index;
1327
- if (delta.content != null && delta.content.length > 0) {
1328
- const textContent = delta.content;
1329
- const lastMessage = body.messages[body.messages.length - 1];
1330
- if ((lastMessage == null ? void 0 : lastMessage.role) === "assistant" && textContent === lastMessage.content) {
1331
- return;
1332
- }
1333
- const blockId = `text-${value.id || choiceIndex}`;
1334
- if (contentBlocks[blockId] == null) {
1335
- contentBlocks[blockId] = { type: "text" };
1336
- controller.enqueue({
1337
- type: "text-start",
1338
- id: blockId
1339
- });
1340
- }
1341
- controller.enqueue({
1342
- type: "text-delta",
1343
- id: blockId,
1344
- delta: textContent
1345
- });
1346
- }
1347
- if (delta.reasoning_content != null && delta.reasoning_content.length > 0) {
1348
- const blockId = `reasoning-${value.id || choiceIndex}`;
1349
- if (lastReasoningDeltas[blockId] === delta.reasoning_content) {
1350
- return;
1351
- }
1352
- lastReasoningDeltas[blockId] = delta.reasoning_content;
1353
- if (contentBlocks[blockId] == null) {
1354
- contentBlocks[blockId] = { type: "reasoning" };
1355
- controller.enqueue({
1356
- type: "reasoning-start",
1357
- id: blockId
1358
- });
1359
- }
1360
- controller.enqueue({
1361
- type: "reasoning-delta",
1362
- id: blockId,
1363
- delta: delta.reasoning_content
1364
- });
1365
- }
1366
- if (delta.tool_calls != null) {
1367
- for (const toolCall of delta.tool_calls) {
1368
- const toolCallId = toolCall.id;
1369
- controller.enqueue({
1370
- type: "tool-input-start",
1371
- id: toolCallId,
1372
- toolName: toolCall.function.name
1373
- });
1374
- controller.enqueue({
1375
- type: "tool-input-delta",
1376
- id: toolCallId,
1377
- delta: toolCall.function.arguments
1378
- });
1379
- controller.enqueue({
1380
- type: "tool-input-end",
1381
- id: toolCallId
1382
- });
1383
- controller.enqueue({
1384
- type: "tool-call",
1385
- toolCallId,
1386
- toolName: toolCall.function.name,
1387
- input: toolCall.function.arguments
1388
- });
1389
- }
1390
- }
1391
- },
1392
- flush(controller) {
1393
- for (const [blockId, block] of Object.entries(contentBlocks)) {
1394
- controller.enqueue({
1395
- type: block.type === "text" ? "text-end" : "reasoning-end",
1396
- id: blockId
1397
- });
1398
- }
1399
- controller.enqueue({ type: "finish", finishReason, usage });
1400
- }
1401
- })
1402
- ),
1403
- request: { body },
1404
- response: { headers: responseHeaders }
1405
- };
1406
- }
1407
- };
1408
- var xaiUsageSchema = z.object({
1409
- prompt_tokens: z.number(),
1410
- completion_tokens: z.number(),
1411
- total_tokens: z.number(),
1412
- completion_tokens_details: z.object({
1413
- reasoning_tokens: z.number().nullish()
1414
- }).nullish()
1415
- });
1416
- var xaiChatResponseSchema = z.object({
1417
- id: z.string().nullish(),
1418
- created: z.number().nullish(),
1419
- model: z.string().nullish(),
1420
- choices: z.array(
1421
- z.object({
1422
- message: z.object({
1423
- role: z.literal("assistant"),
1424
- content: z.string().nullish(),
1425
- reasoning_content: z.string().nullish(),
1426
- tool_calls: z.array(
1427
- z.object({
1428
- id: z.string(),
1429
- type: z.literal("function"),
1430
- function: z.object({
1431
- name: z.string(),
1432
- arguments: z.string()
1433
- })
1434
- })
1435
- ).nullish()
1436
- }),
1437
- index: z.number(),
1438
- finish_reason: z.string().nullish()
1439
- })
1440
- ),
1441
- object: z.literal("chat.completion"),
1442
- usage: xaiUsageSchema,
1443
- citations: z.array(z.string().url()).nullish()
1444
- });
1445
- var xaiChatChunkSchema = z.object({
1446
- id: z.string().nullish(),
1447
- created: z.number().nullish(),
1448
- model: z.string().nullish(),
1449
- choices: z.array(
1450
- z.object({
1451
- delta: z.object({
1452
- role: z.enum(["assistant"]).optional(),
1453
- content: z.string().nullish(),
1454
- reasoning_content: z.string().nullish(),
1455
- tool_calls: z.array(
1456
- z.object({
1457
- id: z.string(),
1458
- type: z.literal("function"),
1459
- function: z.object({
1460
- name: z.string(),
1461
- arguments: z.string()
1462
- })
1463
- })
1464
- ).nullish()
1465
- }),
1466
- finish_reason: z.string().nullish(),
1467
- index: z.number()
1468
- })
1469
- ),
1470
- usage: xaiUsageSchema.nullish(),
1471
- citations: z.array(z.string().url()).nullish()
1472
- });
1473
- var VERSION2 = "2.0.33" ;
1474
- var xaiErrorStructure = {
1475
- errorSchema: xaiErrorDataSchema,
1476
- errorToMessage: (data) => data.error.message
1477
- };
1478
- function createXai(options = {}) {
1479
- var _a;
1480
- const baseURL = withoutTrailingSlash(
1481
- (_a = options.baseURL) != null ? _a : "https://api.x.ai/v1"
1482
- );
1483
- const getHeaders = () => withUserAgentSuffix(
1484
- {
1485
- Authorization: `Bearer ${loadApiKey({
1486
- apiKey: options.apiKey,
1487
- environmentVariableName: "XAI_API_KEY",
1488
- description: "xAI API key"
1489
- })}`,
1490
- ...options.headers
1491
- },
1492
- `ai-sdk/xai/${VERSION2}`
1493
- );
1494
- const createLanguageModel = (modelId) => {
1495
- return new XaiChatLanguageModel(modelId, {
1496
- provider: "xai.chat",
1497
- baseURL,
1498
- headers: getHeaders,
1499
- generateId,
1500
- fetch: options.fetch
1501
- });
1502
- };
1503
- const createImageModel = (modelId) => {
1504
- return new OpenAICompatibleImageModel(modelId, {
1505
- provider: "xai.image",
1506
- url: ({ path }) => `${baseURL}${path}`,
1507
- headers: getHeaders,
1508
- fetch: options.fetch,
1509
- errorStructure: xaiErrorStructure
1510
- });
1511
- };
1512
- const provider = (modelId) => createLanguageModel(modelId);
1513
- provider.languageModel = createLanguageModel;
1514
- provider.chat = createLanguageModel;
1515
- provider.textEmbeddingModel = (modelId) => {
1516
- throw new NoSuchModelError({ modelId, modelType: "textEmbeddingModel" });
1517
- };
1518
- provider.imageModel = createImageModel;
1519
- provider.image = createImageModel;
1520
- return provider;
1521
- }
1522
- createXai();
1523
-
1524
- // src/llm/model/gateway-resolver.ts
1525
- function parseModelRouterId(routerId, gatewayPrefix) {
1526
- if (gatewayPrefix && !routerId.startsWith(`${gatewayPrefix}/`)) {
1527
- throw new Error(`Expected ${gatewayPrefix}/ in model router ID ${routerId}`);
1528
- }
1529
- const idParts = routerId.split("/");
1530
- if (gatewayPrefix === "azure-openai") {
1531
- if (idParts.length < 2) {
1532
- throw new Error(`Expected format azure-openai/deployment-name, but got ${routerId}`);
1533
- }
1534
- return {
1535
- providerId: "azure-openai",
1536
- modelId: idParts.slice(1).join("/")
1537
- // Deployment name
1538
- };
1539
- }
1540
- if (gatewayPrefix && idParts.length < 3) {
1541
- throw new Error(
1542
- `Expected atleast 3 id parts ${gatewayPrefix}/provider/model, but only saw ${idParts.length} in ${routerId}`
1543
- );
1544
- }
1545
- const providerId = idParts.at(gatewayPrefix ? 1 : 0);
1546
- const modelId = idParts.slice(gatewayPrefix ? 2 : 1).join(`/`);
1547
- if (!routerId.includes(`/`) || !providerId || !modelId) {
1548
- throw new Error(
1549
- `Attempted to parse provider/model from ${routerId} but this ID doesn't appear to contain a provider`
1550
- );
1551
- }
1552
- return {
1553
- providerId,
1554
- modelId
1555
- };
1556
- }
1557
-
1558
- // src/llm/model/gateways/constants.ts
1559
- var PROVIDERS_WITH_INSTALLED_PACKAGES = ["anthropic", "google", "mistral", "openai", "openrouter", "xai"];
1560
- var EXCLUDED_PROVIDERS = ["github-copilot"];
1561
-
1562
- // src/llm/model/gateways/models-dev.ts
1563
- var OPENAI_COMPATIBLE_OVERRIDES = {
1564
- cerebras: {
1565
- url: "https://api.cerebras.ai/v1"
1566
- },
1567
- mistral: {
1568
- url: "https://api.mistral.ai/v1"
1569
- },
1570
- groq: {
1571
- url: "https://api.groq.com/openai/v1"
1572
- },
1573
- togetherai: {
1574
- url: "https://api.together.xyz/v1"
1575
- },
1576
- deepinfra: {
1577
- url: "https://api.deepinfra.com/v1/openai"
1578
- },
1579
- perplexity: {
1580
- url: "https://api.perplexity.ai"
1581
- },
1582
- vercel: {
1583
- url: "https://ai-gateway.vercel.sh/v1",
1584
- apiKeyEnvVar: "AI_GATEWAY_API_KEY"
1585
- }
1586
- };
1587
- var ModelsDevGateway = class extends MastraModelGateway {
1588
- id = "models.dev";
1589
- name = "models.dev";
1590
- providerConfigs = {};
1591
- constructor(providerConfigs) {
1592
- super();
1593
- if (providerConfigs) this.providerConfigs = providerConfigs;
1594
- }
1595
- async fetchProviders() {
1596
- const response = await fetch("https://models.dev/api.json");
1597
- if (!response.ok) {
1598
- throw new Error(`Failed to fetch from models.dev: ${response.statusText}`);
1599
- }
1600
- const data = await response.json();
1601
- const providerConfigs = {};
1602
- for (const [providerId, providerInfo] of Object.entries(data)) {
1603
- if (EXCLUDED_PROVIDERS.includes(providerId)) continue;
1604
- if (!providerInfo || typeof providerInfo !== "object" || !providerInfo.models) continue;
1605
- const normalizedId = providerId;
1606
- const isOpenAICompatible = providerInfo.npm === "@ai-sdk/openai-compatible" || providerInfo.npm === "@ai-sdk/gateway" || // Vercel AI Gateway is OpenAI-compatible
1607
- normalizedId in OPENAI_COMPATIBLE_OVERRIDES;
1608
- const hasInstalledPackage = PROVIDERS_WITH_INSTALLED_PACKAGES.includes(providerId);
1609
- const hasApiAndEnv = providerInfo.api && providerInfo.env && providerInfo.env.length > 0;
1610
- if (isOpenAICompatible || hasInstalledPackage || hasApiAndEnv) {
1611
- const modelIds = Object.keys(providerInfo.models).sort();
1612
- const url = providerInfo.api || OPENAI_COMPATIBLE_OVERRIDES[normalizedId]?.url;
1613
- if (!hasInstalledPackage && !url) {
1614
- continue;
1615
- }
1616
- const apiKeyEnvVar = providerInfo.env?.[0] || `${normalizedId.toUpperCase().replace(/-/g, "_")}_API_KEY`;
1617
- const apiKeyHeader = !hasInstalledPackage ? OPENAI_COMPATIBLE_OVERRIDES[normalizedId]?.apiKeyHeader || "Authorization" : void 0;
1618
- providerConfigs[normalizedId] = {
1619
- url,
1620
- apiKeyEnvVar,
1621
- apiKeyHeader,
1622
- name: providerInfo.name || providerId.charAt(0).toUpperCase() + providerId.slice(1),
1623
- models: modelIds,
1624
- docUrl: providerInfo.doc,
1625
- // Include documentation URL if available
1626
- gateway: `models.dev`
1627
- };
1628
- }
1629
- }
1630
- this.providerConfigs = providerConfigs;
1631
- return providerConfigs;
1632
- }
1633
- buildUrl(routerId, envVars) {
1634
- const { providerId } = parseModelRouterId(routerId);
1635
- const config = this.providerConfigs[providerId];
1636
- if (!config?.url) {
1637
- return;
1638
- }
1639
- const baseUrlEnvVar = `${providerId.toUpperCase().replace(/-/g, "_")}_BASE_URL`;
1640
- const customBaseUrl = envVars?.[baseUrlEnvVar] || process.env[baseUrlEnvVar];
1641
- return customBaseUrl || config.url;
1642
- }
1643
- getApiKey(modelId) {
1644
- const [provider, model] = modelId.split("/");
1645
- if (!provider || !model) {
1646
- throw new Error(`Could not identify provider from model id ${modelId}`);
1647
- }
1648
- const config = this.providerConfigs[provider];
1649
- if (!config) {
1650
- throw new Error(`Could not find config for provider ${provider} with model id ${modelId}`);
1651
- }
1652
- const apiKey = typeof config.apiKeyEnvVar === `string` ? process.env[config.apiKeyEnvVar] : void 0;
1653
- if (!apiKey) {
1654
- throw new Error(`Could not find API key process.env.${config.apiKeyEnvVar} for model id ${modelId}`);
1655
- }
1656
- return Promise.resolve(apiKey);
1657
- }
1658
- async resolveLanguageModel({
1659
- modelId,
1660
- providerId,
1661
- apiKey,
1662
- headers
1663
- }) {
1664
- const baseURL = this.buildUrl(`${providerId}/${modelId}`);
1665
- switch (providerId) {
1666
- case "openai":
1667
- return createOpenAI({ apiKey }).responses(modelId);
1668
- case "gemini":
1669
- case "google":
1670
- return createGoogleGenerativeAI({
1671
- apiKey
1672
- }).chat(modelId);
1673
- case "anthropic":
1674
- return createAnthropic({ apiKey })(modelId);
1675
- case "mistral":
1676
- return createMistral({ apiKey })(modelId);
1677
- case "openrouter":
1678
- return createOpenRouter({ apiKey, headers })(modelId);
1679
- case "xai":
1680
- return createXai({
1681
- apiKey
1682
- })(modelId);
1683
- default:
1684
- if (!baseURL) throw new Error(`No API URL found for ${providerId}/${modelId}`);
1685
- return createOpenAICompatible({ name: providerId, apiKey, baseURL, supportsStructuredOutputs: true }).chatModel(
1686
- modelId
1687
- );
1688
- }
1689
- }
1690
- };
1691
-
1692
- export { ModelsDevGateway, parseModelRouterId };
1693
- //# sourceMappingURL=chunk-T2UNO766.js.map
1694
- //# sourceMappingURL=chunk-T2UNO766.js.map