@core-ai/mistral 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +26 -0
- package/dist/index.d.ts +15 -0
- package/dist/index.js +416 -0
- package/package.json +58 -0
package/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2025 Omnifact (https://omnifact.ai)
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
package/README.md
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
# @core-ai/mistral
|
|
2
|
+
|
|
3
|
+
Mistral provider package for `@core-ai/core-ai`.
|
|
4
|
+
|
|
5
|
+
## Installation
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
npm install @core-ai/core-ai @core-ai/mistral zod
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
## Usage
|
|
12
|
+
|
|
13
|
+
```ts
|
|
14
|
+
import { generate } from '@core-ai/core-ai';
|
|
15
|
+
import { createMistral } from '@core-ai/mistral';
|
|
16
|
+
|
|
17
|
+
const mistral = createMistral({ apiKey: process.env.MISTRAL_API_KEY });
|
|
18
|
+
const model = mistral.chatModel('mistral-large-latest');
|
|
19
|
+
|
|
20
|
+
const result = await generate({
|
|
21
|
+
model,
|
|
22
|
+
messages: [{ role: 'user', content: 'Hello!' }],
|
|
23
|
+
});
|
|
24
|
+
|
|
25
|
+
console.log(result.content);
|
|
26
|
+
```
|
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
import { Mistral } from '@mistralai/mistralai';
|
|
2
|
+
import { ChatModel, EmbeddingModel } from '@core-ai/core-ai';
|
|
3
|
+
|
|
4
|
+
type MistralProviderOptions = {
|
|
5
|
+
apiKey?: string;
|
|
6
|
+
baseURL?: string;
|
|
7
|
+
client?: Mistral;
|
|
8
|
+
};
|
|
9
|
+
type MistralProvider = {
|
|
10
|
+
chatModel(modelId: string): ChatModel;
|
|
11
|
+
embeddingModel(modelId: string): EmbeddingModel;
|
|
12
|
+
};
|
|
13
|
+
declare function createMistral(options?: MistralProviderOptions): MistralProvider;
|
|
14
|
+
|
|
15
|
+
export { type MistralProvider, type MistralProviderOptions, createMistral };
|
package/dist/index.js
ADDED
|
@@ -0,0 +1,416 @@
|
|
|
1
|
+
// src/provider.ts
|
|
2
|
+
import { Mistral } from "@mistralai/mistralai";
|
|
3
|
+
|
|
4
|
+
// src/chat-model.ts
|
|
5
|
+
import { createStreamResult } from "@core-ai/core-ai";
|
|
6
|
+
|
|
7
|
+
// src/chat-adapter.ts
|
|
8
|
+
import { MistralError } from "@mistralai/mistralai/models/errors";
|
|
9
|
+
import { zodToJsonSchema } from "zod-to-json-schema";
|
|
10
|
+
import { ProviderError } from "@core-ai/core-ai";
|
|
11
|
+
function convertMessages(messages) {
|
|
12
|
+
return messages.map(convertMessage);
|
|
13
|
+
}
|
|
14
|
+
function convertMessage(message) {
|
|
15
|
+
if (message.role === "system") {
|
|
16
|
+
return {
|
|
17
|
+
role: "system",
|
|
18
|
+
content: message.content
|
|
19
|
+
};
|
|
20
|
+
}
|
|
21
|
+
if (message.role === "user") {
|
|
22
|
+
return {
|
|
23
|
+
role: "user",
|
|
24
|
+
content: typeof message.content === "string" ? message.content : message.content.map(convertUserContentPart)
|
|
25
|
+
};
|
|
26
|
+
}
|
|
27
|
+
if (message.role === "assistant") {
|
|
28
|
+
return {
|
|
29
|
+
role: "assistant",
|
|
30
|
+
content: message.content,
|
|
31
|
+
...message.toolCalls && message.toolCalls.length > 0 ? {
|
|
32
|
+
toolCalls: message.toolCalls.map((toolCall) => ({
|
|
33
|
+
id: toolCall.id,
|
|
34
|
+
type: "function",
|
|
35
|
+
function: {
|
|
36
|
+
name: toolCall.name,
|
|
37
|
+
arguments: toolCall.arguments
|
|
38
|
+
}
|
|
39
|
+
}))
|
|
40
|
+
} : {}
|
|
41
|
+
};
|
|
42
|
+
}
|
|
43
|
+
return {
|
|
44
|
+
role: "tool",
|
|
45
|
+
toolCallId: message.toolCallId,
|
|
46
|
+
content: message.content
|
|
47
|
+
};
|
|
48
|
+
}
|
|
49
|
+
function convertUserContentPart(part) {
|
|
50
|
+
if (part.type === "text") {
|
|
51
|
+
return {
|
|
52
|
+
type: "text",
|
|
53
|
+
text: part.text
|
|
54
|
+
};
|
|
55
|
+
}
|
|
56
|
+
if (part.type === "image") {
|
|
57
|
+
const url = part.source.type === "url" ? part.source.url : `data:${part.source.mediaType};base64,${part.source.data}`;
|
|
58
|
+
return {
|
|
59
|
+
type: "image_url",
|
|
60
|
+
imageUrl: {
|
|
61
|
+
url
|
|
62
|
+
}
|
|
63
|
+
};
|
|
64
|
+
}
|
|
65
|
+
return {
|
|
66
|
+
type: "document_url",
|
|
67
|
+
documentUrl: `data:${part.mimeType};base64,${part.data}`,
|
|
68
|
+
...part.filename ? { documentName: part.filename } : {}
|
|
69
|
+
};
|
|
70
|
+
}
|
|
71
|
+
function convertTools(tools) {
|
|
72
|
+
return Object.values(tools).map((tool) => ({
|
|
73
|
+
type: "function",
|
|
74
|
+
function: {
|
|
75
|
+
name: tool.name,
|
|
76
|
+
description: tool.description,
|
|
77
|
+
parameters: zodToJsonSchema(tool.parameters)
|
|
78
|
+
}
|
|
79
|
+
}));
|
|
80
|
+
}
|
|
81
|
+
function convertToolChoice(choice) {
|
|
82
|
+
if (typeof choice === "string") {
|
|
83
|
+
return choice;
|
|
84
|
+
}
|
|
85
|
+
return {
|
|
86
|
+
type: "function",
|
|
87
|
+
function: {
|
|
88
|
+
name: choice.toolName
|
|
89
|
+
}
|
|
90
|
+
};
|
|
91
|
+
}
|
|
92
|
+
function createGenerateRequest(modelId, options) {
|
|
93
|
+
const baseRequest = {
|
|
94
|
+
model: modelId,
|
|
95
|
+
messages: convertMessages(options.messages),
|
|
96
|
+
...options.tools && Object.keys(options.tools).length > 0 ? { tools: convertTools(options.tools) } : {},
|
|
97
|
+
...options.toolChoice ? { toolChoice: convertToolChoice(options.toolChoice) } : {},
|
|
98
|
+
...options.config?.temperature !== void 0 ? { temperature: options.config.temperature } : {},
|
|
99
|
+
...options.config?.maxTokens !== void 0 ? { maxTokens: options.config.maxTokens } : {},
|
|
100
|
+
...options.config?.topP !== void 0 ? { topP: options.config.topP } : {},
|
|
101
|
+
...options.config?.stopSequences ? { stop: options.config.stopSequences } : {},
|
|
102
|
+
...options.config?.frequencyPenalty !== void 0 ? { frequencyPenalty: options.config.frequencyPenalty } : {},
|
|
103
|
+
...options.config?.presencePenalty !== void 0 ? { presencePenalty: options.config.presencePenalty } : {}
|
|
104
|
+
};
|
|
105
|
+
return options.providerOptions ? {
|
|
106
|
+
...baseRequest,
|
|
107
|
+
...options.providerOptions
|
|
108
|
+
} : baseRequest;
|
|
109
|
+
}
|
|
110
|
+
function createStreamRequest(modelId, options) {
|
|
111
|
+
const baseRequest = {
|
|
112
|
+
model: modelId,
|
|
113
|
+
messages: convertMessages(options.messages),
|
|
114
|
+
stream: true,
|
|
115
|
+
...options.tools && Object.keys(options.tools).length > 0 ? { tools: convertTools(options.tools) } : {},
|
|
116
|
+
...options.toolChoice ? { toolChoice: convertToolChoice(options.toolChoice) } : {},
|
|
117
|
+
...options.config?.temperature !== void 0 ? { temperature: options.config.temperature } : {},
|
|
118
|
+
...options.config?.maxTokens !== void 0 ? { maxTokens: options.config.maxTokens } : {},
|
|
119
|
+
...options.config?.topP !== void 0 ? { topP: options.config.topP } : {},
|
|
120
|
+
...options.config?.stopSequences ? { stop: options.config.stopSequences } : {},
|
|
121
|
+
...options.config?.frequencyPenalty !== void 0 ? { frequencyPenalty: options.config.frequencyPenalty } : {},
|
|
122
|
+
...options.config?.presencePenalty !== void 0 ? { presencePenalty: options.config.presencePenalty } : {}
|
|
123
|
+
};
|
|
124
|
+
return options.providerOptions ? {
|
|
125
|
+
...baseRequest,
|
|
126
|
+
...options.providerOptions
|
|
127
|
+
} : baseRequest;
|
|
128
|
+
}
|
|
129
|
+
function mapGenerateResponse(response) {
|
|
130
|
+
const firstChoice = response.choices[0];
|
|
131
|
+
if (!firstChoice) {
|
|
132
|
+
return {
|
|
133
|
+
content: null,
|
|
134
|
+
toolCalls: [],
|
|
135
|
+
finishReason: "unknown",
|
|
136
|
+
usage: mapUsage(response.usage)
|
|
137
|
+
};
|
|
138
|
+
}
|
|
139
|
+
const toolCalls = parseToolCalls(firstChoice.message.toolCalls);
|
|
140
|
+
const mappedFinishReason = mapFinishReason(firstChoice.finishReason);
|
|
141
|
+
return {
|
|
142
|
+
content: extractTextContent(firstChoice.message.content),
|
|
143
|
+
toolCalls,
|
|
144
|
+
finishReason: toolCalls.length > 0 && mappedFinishReason !== "content-filter" ? "tool-calls" : mappedFinishReason,
|
|
145
|
+
usage: mapUsage(response.usage)
|
|
146
|
+
};
|
|
147
|
+
}
|
|
148
|
+
async function* transformStream(stream) {
|
|
149
|
+
const bufferedToolCalls = /* @__PURE__ */ new Map();
|
|
150
|
+
const emittedToolCalls = /* @__PURE__ */ new Set();
|
|
151
|
+
let finishReason = "unknown";
|
|
152
|
+
let usage = {
|
|
153
|
+
inputTokens: 0,
|
|
154
|
+
outputTokens: 0,
|
|
155
|
+
reasoningTokens: 0,
|
|
156
|
+
totalTokens: 0
|
|
157
|
+
};
|
|
158
|
+
for await (const event of stream) {
|
|
159
|
+
const chunk = event.data;
|
|
160
|
+
if (chunk.usage) {
|
|
161
|
+
usage = mapUsage(chunk.usage);
|
|
162
|
+
}
|
|
163
|
+
const choice = chunk.choices[0];
|
|
164
|
+
if (!choice) {
|
|
165
|
+
continue;
|
|
166
|
+
}
|
|
167
|
+
for (const textDelta of extractTextDeltas(choice.delta.content)) {
|
|
168
|
+
yield {
|
|
169
|
+
type: "content-delta",
|
|
170
|
+
text: textDelta
|
|
171
|
+
};
|
|
172
|
+
}
|
|
173
|
+
if (choice.delta.toolCalls) {
|
|
174
|
+
for (const [position, partialToolCall] of choice.delta.toolCalls.entries()) {
|
|
175
|
+
const streamIndex = partialToolCall.index ?? position;
|
|
176
|
+
const current = bufferedToolCalls.get(streamIndex) ?? {
|
|
177
|
+
id: partialToolCall.id ?? `tool-${streamIndex}`,
|
|
178
|
+
name: partialToolCall.function.name,
|
|
179
|
+
arguments: ""
|
|
180
|
+
};
|
|
181
|
+
const isNew = !bufferedToolCalls.has(streamIndex);
|
|
182
|
+
if (partialToolCall.id) {
|
|
183
|
+
current.id = partialToolCall.id;
|
|
184
|
+
}
|
|
185
|
+
if (partialToolCall.function.name) {
|
|
186
|
+
current.name = partialToolCall.function.name;
|
|
187
|
+
}
|
|
188
|
+
const argumentDelta = partialToolCall.function.arguments;
|
|
189
|
+
if (typeof argumentDelta === "string") {
|
|
190
|
+
current.arguments += argumentDelta;
|
|
191
|
+
yield {
|
|
192
|
+
type: "tool-call-delta",
|
|
193
|
+
toolCallId: current.id,
|
|
194
|
+
argumentsDelta: argumentDelta
|
|
195
|
+
};
|
|
196
|
+
} else {
|
|
197
|
+
const serializedArguments = serializeJsonObject(argumentDelta);
|
|
198
|
+
if (serializedArguments.length > 0) {
|
|
199
|
+
current.arguments = serializedArguments;
|
|
200
|
+
yield {
|
|
201
|
+
type: "tool-call-delta",
|
|
202
|
+
toolCallId: current.id,
|
|
203
|
+
argumentsDelta: serializedArguments
|
|
204
|
+
};
|
|
205
|
+
}
|
|
206
|
+
}
|
|
207
|
+
bufferedToolCalls.set(streamIndex, current);
|
|
208
|
+
if (isNew) {
|
|
209
|
+
yield {
|
|
210
|
+
type: "tool-call-start",
|
|
211
|
+
toolCallId: current.id,
|
|
212
|
+
toolName: current.name
|
|
213
|
+
};
|
|
214
|
+
}
|
|
215
|
+
}
|
|
216
|
+
}
|
|
217
|
+
if (choice.finishReason) {
|
|
218
|
+
finishReason = mapFinishReason(choice.finishReason);
|
|
219
|
+
}
|
|
220
|
+
if (finishReason === "tool-calls") {
|
|
221
|
+
yield* emitBufferedToolCalls(bufferedToolCalls, emittedToolCalls);
|
|
222
|
+
}
|
|
223
|
+
}
|
|
224
|
+
yield* emitBufferedToolCalls(bufferedToolCalls, emittedToolCalls);
|
|
225
|
+
yield {
|
|
226
|
+
type: "finish",
|
|
227
|
+
finishReason,
|
|
228
|
+
usage
|
|
229
|
+
};
|
|
230
|
+
}
|
|
231
|
+
function* emitBufferedToolCalls(bufferedToolCalls, emittedToolCalls) {
|
|
232
|
+
for (const [toolCallIndex, toolCall] of bufferedToolCalls.entries()) {
|
|
233
|
+
if (emittedToolCalls.has(toolCallIndex)) {
|
|
234
|
+
continue;
|
|
235
|
+
}
|
|
236
|
+
emittedToolCalls.add(toolCallIndex);
|
|
237
|
+
yield {
|
|
238
|
+
type: "tool-call-end",
|
|
239
|
+
toolCall: {
|
|
240
|
+
id: toolCall.id,
|
|
241
|
+
name: toolCall.name,
|
|
242
|
+
arguments: safeParseJsonObject(toolCall.arguments)
|
|
243
|
+
}
|
|
244
|
+
};
|
|
245
|
+
}
|
|
246
|
+
}
|
|
247
|
+
function parseToolCalls(calls) {
|
|
248
|
+
if (!calls || calls.length === 0) {
|
|
249
|
+
return [];
|
|
250
|
+
}
|
|
251
|
+
return calls.map((toolCall, index) => ({
|
|
252
|
+
id: toolCall.id ?? `tool-${index}`,
|
|
253
|
+
name: toolCall.function.name,
|
|
254
|
+
arguments: toObject(toolCall.function.arguments)
|
|
255
|
+
}));
|
|
256
|
+
}
|
|
257
|
+
function mapFinishReason(reason) {
|
|
258
|
+
if (reason === "stop") {
|
|
259
|
+
return "stop";
|
|
260
|
+
}
|
|
261
|
+
if (reason === "length" || reason === "model_length") {
|
|
262
|
+
return "length";
|
|
263
|
+
}
|
|
264
|
+
if (reason === "tool_calls") {
|
|
265
|
+
return "tool-calls";
|
|
266
|
+
}
|
|
267
|
+
return "unknown";
|
|
268
|
+
}
|
|
269
|
+
function mapUsage(usage) {
|
|
270
|
+
return {
|
|
271
|
+
inputTokens: usage?.promptTokens ?? 0,
|
|
272
|
+
outputTokens: usage?.completionTokens ?? 0,
|
|
273
|
+
reasoningTokens: 0,
|
|
274
|
+
totalTokens: usage?.totalTokens ?? 0
|
|
275
|
+
};
|
|
276
|
+
}
|
|
277
|
+
function extractTextContent(content) {
|
|
278
|
+
if (typeof content === "string") {
|
|
279
|
+
return content;
|
|
280
|
+
}
|
|
281
|
+
if (!content || content.length === 0) {
|
|
282
|
+
return null;
|
|
283
|
+
}
|
|
284
|
+
const text = content.flatMap((chunk) => chunk.type === "text" ? [chunk.text] : []).join("");
|
|
285
|
+
return text.length > 0 ? text : null;
|
|
286
|
+
}
|
|
287
|
+
function extractTextDeltas(content) {
|
|
288
|
+
if (typeof content === "string") {
|
|
289
|
+
return [content];
|
|
290
|
+
}
|
|
291
|
+
if (!content || content.length === 0) {
|
|
292
|
+
return [];
|
|
293
|
+
}
|
|
294
|
+
return content.flatMap((chunk) => chunk.type === "text" ? [chunk.text] : []);
|
|
295
|
+
}
|
|
296
|
+
function serializeJsonObject(value) {
|
|
297
|
+
const objectValue = asObject(value);
|
|
298
|
+
return Object.keys(objectValue).length > 0 ? JSON.stringify(objectValue) : "";
|
|
299
|
+
}
|
|
300
|
+
function toObject(value) {
|
|
301
|
+
if (typeof value === "string") {
|
|
302
|
+
return safeParseJsonObject(value);
|
|
303
|
+
}
|
|
304
|
+
return asObject(value);
|
|
305
|
+
}
|
|
306
|
+
function safeParseJsonObject(json) {
|
|
307
|
+
try {
|
|
308
|
+
const parsed = JSON.parse(json);
|
|
309
|
+
return asObject(parsed);
|
|
310
|
+
} catch {
|
|
311
|
+
return {};
|
|
312
|
+
}
|
|
313
|
+
}
|
|
314
|
+
function asObject(value) {
|
|
315
|
+
if (value && typeof value === "object" && !Array.isArray(value)) {
|
|
316
|
+
return value;
|
|
317
|
+
}
|
|
318
|
+
return {};
|
|
319
|
+
}
|
|
320
|
+
function wrapError(error) {
|
|
321
|
+
if (error instanceof MistralError) {
|
|
322
|
+
return new ProviderError(error.message, "mistral", error.statusCode, error);
|
|
323
|
+
}
|
|
324
|
+
return new ProviderError(
|
|
325
|
+
error instanceof Error ? error.message : String(error),
|
|
326
|
+
"mistral",
|
|
327
|
+
void 0,
|
|
328
|
+
error
|
|
329
|
+
);
|
|
330
|
+
}
|
|
331
|
+
|
|
332
|
+
// src/chat-model.ts
|
|
333
|
+
function createMistralChatModel(client, modelId) {
|
|
334
|
+
return {
|
|
335
|
+
provider: "mistral",
|
|
336
|
+
modelId,
|
|
337
|
+
async generate(options) {
|
|
338
|
+
try {
|
|
339
|
+
const request = createGenerateRequest(modelId, options);
|
|
340
|
+
const response = await client.chat.complete(request);
|
|
341
|
+
return mapGenerateResponse(response);
|
|
342
|
+
} catch (error) {
|
|
343
|
+
throw wrapError(error);
|
|
344
|
+
}
|
|
345
|
+
},
|
|
346
|
+
async stream(options) {
|
|
347
|
+
try {
|
|
348
|
+
const request = createStreamRequest(modelId, options);
|
|
349
|
+
const stream = await client.chat.stream(
|
|
350
|
+
request
|
|
351
|
+
);
|
|
352
|
+
return createStreamResult(transformStream(stream));
|
|
353
|
+
} catch (error) {
|
|
354
|
+
throw wrapError(error);
|
|
355
|
+
}
|
|
356
|
+
}
|
|
357
|
+
};
|
|
358
|
+
}
|
|
359
|
+
|
|
360
|
+
// src/embedding-model.ts
|
|
361
|
+
import { MistralError as MistralError2 } from "@mistralai/mistralai/models/errors";
|
|
362
|
+
import { ProviderError as ProviderError2 } from "@core-ai/core-ai";
|
|
363
|
+
function createMistralEmbeddingModel(client, modelId) {
|
|
364
|
+
return {
|
|
365
|
+
provider: "mistral",
|
|
366
|
+
modelId,
|
|
367
|
+
async embed(options) {
|
|
368
|
+
try {
|
|
369
|
+
const baseRequest = {
|
|
370
|
+
model: modelId,
|
|
371
|
+
inputs: options.input,
|
|
372
|
+
...options.dimensions !== void 0 ? { outputDimension: options.dimensions } : {}
|
|
373
|
+
};
|
|
374
|
+
const request = options.providerOptions ? {
|
|
375
|
+
...baseRequest,
|
|
376
|
+
...options.providerOptions
|
|
377
|
+
} : baseRequest;
|
|
378
|
+
const response = await client.embeddings.create(request);
|
|
379
|
+
return {
|
|
380
|
+
embeddings: response.data.slice().sort((a, b) => (a.index ?? 0) - (b.index ?? 0)).map((item) => item.embedding ?? []),
|
|
381
|
+
usage: {
|
|
382
|
+
inputTokens: response.usage.promptTokens ?? 0
|
|
383
|
+
}
|
|
384
|
+
};
|
|
385
|
+
} catch (error) {
|
|
386
|
+
throw wrapError2(error);
|
|
387
|
+
}
|
|
388
|
+
}
|
|
389
|
+
};
|
|
390
|
+
}
|
|
391
|
+
function wrapError2(error) {
|
|
392
|
+
if (error instanceof MistralError2) {
|
|
393
|
+
return new ProviderError2(error.message, "mistral", error.statusCode, error);
|
|
394
|
+
}
|
|
395
|
+
return new ProviderError2(
|
|
396
|
+
error instanceof Error ? error.message : String(error),
|
|
397
|
+
"mistral",
|
|
398
|
+
void 0,
|
|
399
|
+
error
|
|
400
|
+
);
|
|
401
|
+
}
|
|
402
|
+
|
|
403
|
+
// src/provider.ts
|
|
404
|
+
function createMistral(options = {}) {
|
|
405
|
+
const client = options.client ?? new Mistral({
|
|
406
|
+
apiKey: options.apiKey,
|
|
407
|
+
...options.baseURL ? { serverURL: options.baseURL } : {}
|
|
408
|
+
});
|
|
409
|
+
return {
|
|
410
|
+
chatModel: (modelId) => createMistralChatModel(client, modelId),
|
|
411
|
+
embeddingModel: (modelId) => createMistralEmbeddingModel(client, modelId)
|
|
412
|
+
};
|
|
413
|
+
}
|
|
414
|
+
export {
|
|
415
|
+
createMistral
|
|
416
|
+
};
|
package/package.json
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@core-ai/mistral",
|
|
3
|
+
"version": "0.2.0",
|
|
4
|
+
"description": "Mistral provider package for @core-ai/core-ai",
|
|
5
|
+
"license": "MIT",
|
|
6
|
+
"author": "Omnifact (https://omnifact.ai)",
|
|
7
|
+
"repository": {
|
|
8
|
+
"type": "git",
|
|
9
|
+
"url": "git+https://github.com/agdevhq/ai-core.git",
|
|
10
|
+
"directory": "packages/mistral"
|
|
11
|
+
},
|
|
12
|
+
"keywords": [
|
|
13
|
+
"llm",
|
|
14
|
+
"ai",
|
|
15
|
+
"mistral",
|
|
16
|
+
"provider",
|
|
17
|
+
"sdk"
|
|
18
|
+
],
|
|
19
|
+
"type": "module",
|
|
20
|
+
"main": "./dist/index.js",
|
|
21
|
+
"types": "./dist/index.d.ts",
|
|
22
|
+
"exports": {
|
|
23
|
+
".": {
|
|
24
|
+
"types": "./dist/index.d.ts",
|
|
25
|
+
"import": "./dist/index.js"
|
|
26
|
+
}
|
|
27
|
+
},
|
|
28
|
+
"files": [
|
|
29
|
+
"dist",
|
|
30
|
+
"README.md",
|
|
31
|
+
"LICENSE"
|
|
32
|
+
],
|
|
33
|
+
"publishConfig": {
|
|
34
|
+
"access": "public"
|
|
35
|
+
},
|
|
36
|
+
"scripts": {
|
|
37
|
+
"build": "tsup",
|
|
38
|
+
"lint": "eslint src/ --max-warnings 0",
|
|
39
|
+
"check-types": "tsc --noEmit",
|
|
40
|
+
"prepublishOnly": "npm run build",
|
|
41
|
+
"test": "vitest run",
|
|
42
|
+
"test:watch": "vitest"
|
|
43
|
+
},
|
|
44
|
+
"dependencies": {
|
|
45
|
+
"@core-ai/core-ai": "^0.2.0",
|
|
46
|
+
"@mistralai/mistralai": "^1.14.0",
|
|
47
|
+
"zod-to-json-schema": "^3.25.1"
|
|
48
|
+
},
|
|
49
|
+
"peerDependencies": {
|
|
50
|
+
"zod": "^3.25.76"
|
|
51
|
+
},
|
|
52
|
+
"devDependencies": {
|
|
53
|
+
"@core-ai/eslint-config": "^0.0.0",
|
|
54
|
+
"@core-ai/typescript-config": "^0.0.0",
|
|
55
|
+
"typescript": "^5.7.3",
|
|
56
|
+
"vitest": "^3.2.4"
|
|
57
|
+
}
|
|
58
|
+
}
|