modelfusion 0.94.0 → 0.96.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. package/README.md +3 -3
  2. package/core/api/postToApi.cjs +30 -1
  3. package/core/api/postToApi.d.ts +7 -1
  4. package/core/api/postToApi.js +29 -1
  5. package/model-provider/index.cjs +1 -0
  6. package/model-provider/index.d.ts +1 -0
  7. package/model-provider/index.js +1 -0
  8. package/model-provider/llamacpp/LlamaCppTextGenerationModel.cjs +3 -1
  9. package/model-provider/llamacpp/LlamaCppTextGenerationModel.d.ts +4 -0
  10. package/model-provider/llamacpp/LlamaCppTextGenerationModel.js +3 -1
  11. package/model-provider/mistral/MistralApiConfiguration.cjs +22 -0
  12. package/model-provider/mistral/MistralApiConfiguration.d.ts +12 -0
  13. package/model-provider/mistral/MistralApiConfiguration.js +18 -0
  14. package/model-provider/mistral/MistralError.cjs +17 -0
  15. package/model-provider/mistral/MistralError.d.ts +13 -0
  16. package/model-provider/mistral/MistralError.js +14 -0
  17. package/model-provider/mistral/MistralFacade.cjs +18 -0
  18. package/model-provider/mistral/MistralFacade.d.ts +6 -0
  19. package/model-provider/mistral/MistralFacade.js +12 -0
  20. package/model-provider/mistral/MistralPromptTemplate.cjs +64 -0
  21. package/model-provider/mistral/MistralPromptTemplate.d.ts +16 -0
  22. package/model-provider/mistral/MistralPromptTemplate.js +58 -0
  23. package/model-provider/mistral/MistralTextEmbeddingModel.cjs +100 -0
  24. package/model-provider/mistral/MistralTextEmbeddingModel.d.ts +106 -0
  25. package/model-provider/mistral/MistralTextEmbeddingModel.js +96 -0
  26. package/model-provider/mistral/MistralTextGenerationModel.cjs +254 -0
  27. package/model-provider/mistral/MistralTextGenerationModel.d.ts +231 -0
  28. package/model-provider/mistral/MistralTextGenerationModel.js +250 -0
  29. package/model-provider/mistral/index.cjs +34 -0
  30. package/model-provider/mistral/index.d.ts +6 -0
  31. package/model-provider/mistral/index.js +5 -0
  32. package/model-provider/ollama/OllamaError.cjs +5 -30
  33. package/model-provider/ollama/OllamaError.js +5 -29
  34. package/model-provider/ollama/OllamaTextEmbeddingModel.cjs +1 -7
  35. package/model-provider/ollama/OllamaTextEmbeddingModel.d.ts +0 -1
  36. package/model-provider/ollama/OllamaTextEmbeddingModel.js +1 -7
  37. package/model-provider/openai/OpenAICompletionModel.d.ts +4 -4
  38. package/model-provider/openai/OpenAIError.cjs +9 -34
  39. package/model-provider/openai/OpenAIError.d.ts +1 -3
  40. package/model-provider/openai/OpenAIError.js +9 -33
  41. package/model-provider/openai/chat/AbstractOpenAIChatModel.d.ts +6 -6
  42. package/model-provider/openai/chat/OpenAIChatFunctionCallStructureGenerationModel.d.ts +1 -1
  43. package/model-provider/whispercpp/WhisperCppTranscriptionModel.cjs +2 -1
  44. package/model-provider/whispercpp/WhisperCppTranscriptionModel.js +2 -1
  45. package/package.json +1 -1
package/README.md CHANGED
@@ -61,7 +61,7 @@ const text = await generateText(
61
61
  );
62
62
  ```
63
63
 
64
- Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [OpenAI compatible](https://modelfusion.dev/integration/model-provider/openaicompatible), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp), [Ollama](https://modelfusion.dev/integration/model-provider/ollama), [Hugging Face](https://modelfusion.dev/integration/model-provider/huggingface), [Cohere](https://modelfusion.dev/integration/model-provider/cohere), [Anthropic](https://modelfusion.dev/integration/model-provider/anthropic)
64
+ Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [OpenAI compatible](https://modelfusion.dev/integration/model-provider/openaicompatible), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp), [Ollama](https://modelfusion.dev/integration/model-provider/ollama), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp), [Mistral](https://modelfusion.dev/integration/model-provider/mistral), [Hugging Face](https://modelfusion.dev/integration/model-provider/huggingface), [Cohere](https://modelfusion.dev/integration/model-provider/cohere), [Anthropic](https://modelfusion.dev/integration/model-provider/anthropic)
65
65
 
66
66
  #### streamText
67
67
 
@@ -78,7 +78,7 @@ for await (const textPart of textStream) {
78
78
  }
79
79
  ```
80
80
 
81
- Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [OpenAI compatible](https://modelfusion.dev/integration/model-provider/openaicompatible), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp), [Ollama](https://modelfusion.dev/integration/model-provider/ollama), [Cohere](https://modelfusion.dev/integration/model-provider/cohere), [Anthropic](https://modelfusion.dev/integration/model-provider/anthropic)
81
+ Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [OpenAI compatible](https://modelfusion.dev/integration/model-provider/openaicompatible), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp), [Ollama](https://modelfusion.dev/integration/model-provider/ollama), [Mistral](https://modelfusion.dev/integration/model-provider/mistral), [Cohere](https://modelfusion.dev/integration/model-provider/cohere), [Anthropic](https://modelfusion.dev/integration/model-provider/anthropic)
82
82
 
83
83
  #### streamText with multi-modal prompt
84
84
 
@@ -299,7 +299,7 @@ const embeddings = await embedMany(
299
299
  );
300
300
  ```
301
301
 
302
- Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp), [Ollama](https://modelfusion.dev/integration/model-provider/ollama), [Hugging Face](https://modelfusion.dev/integration/model-provider/huggingface), [Cohere](https://modelfusion.dev/integration/model-provider/cohere)
302
+ Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp), [Ollama](https://modelfusion.dev/integration/model-provider/ollama), [Mistral](https://modelfusion.dev/integration/model-provider/mistral), [Hugging Face](https://modelfusion.dev/integration/model-provider/huggingface), [Cohere](https://modelfusion.dev/integration/model-provider/cohere)
303
303
 
304
304
  ### [Tokenize Text](https://modelfusion.dev/guide/function/tokenize-text)
305
305
 
@@ -1,9 +1,38 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.postToApi = exports.postJsonToApi = exports.createAudioMpegResponseHandler = exports.createTextResponseHandler = exports.createJsonResponseHandler = void 0;
3
+ exports.postToApi = exports.postJsonToApi = exports.createAudioMpegResponseHandler = exports.createTextResponseHandler = exports.createJsonResponseHandler = exports.createJsonErrorResponseHandler = void 0;
4
4
  const ZodSchema_js_1 = require("../schema/ZodSchema.cjs");
5
5
  const parseJSON_js_1 = require("../schema/parseJSON.cjs");
6
6
  const ApiCallError_js_1 = require("./ApiCallError.cjs");
7
+ const createJsonErrorResponseHandler = ({ errorSchema, errorToMessage, isRetryable, }) => async ({ response, url, requestBodyValues }) => {
8
+ const responseBody = await response.text();
9
+ // resilient parsing in case the response is not JSON or does not match the schema:
10
+ try {
11
+ const parsedError = (0, parseJSON_js_1.parseJSON)({
12
+ text: responseBody,
13
+ schema: errorSchema,
14
+ });
15
+ return new ApiCallError_js_1.ApiCallError({
16
+ message: errorToMessage(parsedError),
17
+ url,
18
+ requestBodyValues,
19
+ statusCode: response.status,
20
+ responseBody,
21
+ data: parsedError,
22
+ isRetryable: isRetryable?.(parsedError, response),
23
+ });
24
+ }
25
+ catch (parseError) {
26
+ return new ApiCallError_js_1.ApiCallError({
27
+ message: responseBody.trim() !== "" ? responseBody : response.statusText,
28
+ url,
29
+ requestBodyValues,
30
+ statusCode: response.status,
31
+ responseBody,
32
+ });
33
+ }
34
+ };
35
+ exports.createJsonErrorResponseHandler = createJsonErrorResponseHandler;
7
36
  const createJsonResponseHandler = (responseSchema) => async ({ response, url, requestBodyValues }) => {
8
37
  const responseBody = await response.text();
9
38
  const parsedResult = (0, parseJSON_js_1.safeParseJSON)({
@@ -1,12 +1,18 @@
1
1
  /// <reference types="node" />
2
- import { ZodSchema } from "../schema/ZodSchema.js";
3
2
  import { z } from "zod";
3
+ import { Schema } from "../schema/Schema.js";
4
+ import { ZodSchema } from "../schema/ZodSchema.js";
4
5
  import { ApiCallError } from "./ApiCallError.js";
5
6
  export type ResponseHandler<T> = (options: {
6
7
  url: string;
7
8
  requestBodyValues: unknown;
8
9
  response: Response;
9
10
  }) => PromiseLike<T>;
11
+ export declare const createJsonErrorResponseHandler: <T>({ errorSchema, errorToMessage, isRetryable, }: {
12
+ errorSchema: Schema<T>;
13
+ errorToMessage: (error: T) => string;
14
+ isRetryable?: ((error: T, response: Response) => boolean) | undefined;
15
+ }) => ResponseHandler<ApiCallError>;
10
16
  export declare const createJsonResponseHandler: <T>(responseSchema: z.ZodType<T, z.ZodTypeDef, T>) => ResponseHandler<T>;
11
17
  export declare const createTextResponseHandler: () => ResponseHandler<string>;
12
18
  export declare const createAudioMpegResponseHandler: () => ResponseHandler<Buffer>;
@@ -1,6 +1,34 @@
1
1
  import { ZodSchema } from "../schema/ZodSchema.js";
2
- import { safeParseJSON } from "../schema/parseJSON.js";
2
+ import { parseJSON, safeParseJSON } from "../schema/parseJSON.js";
3
3
  import { ApiCallError } from "./ApiCallError.js";
4
+ export const createJsonErrorResponseHandler = ({ errorSchema, errorToMessage, isRetryable, }) => async ({ response, url, requestBodyValues }) => {
5
+ const responseBody = await response.text();
6
+ // resilient parsing in case the response is not JSON or does not match the schema:
7
+ try {
8
+ const parsedError = parseJSON({
9
+ text: responseBody,
10
+ schema: errorSchema,
11
+ });
12
+ return new ApiCallError({
13
+ message: errorToMessage(parsedError),
14
+ url,
15
+ requestBodyValues,
16
+ statusCode: response.status,
17
+ responseBody,
18
+ data: parsedError,
19
+ isRetryable: isRetryable?.(parsedError, response),
20
+ });
21
+ }
22
+ catch (parseError) {
23
+ return new ApiCallError({
24
+ message: responseBody.trim() !== "" ? responseBody : response.statusText,
25
+ url,
26
+ requestBodyValues,
27
+ statusCode: response.status,
28
+ responseBody,
29
+ });
30
+ }
31
+ };
4
32
  export const createJsonResponseHandler = (responseSchema) => async ({ response, url, requestBodyValues }) => {
5
33
  const responseBody = await response.text();
6
34
  const parsedResult = safeParseJSON({
@@ -21,6 +21,7 @@ __exportStar(require("./elevenlabs/index.cjs"), exports);
21
21
  __exportStar(require("./huggingface/index.cjs"), exports);
22
22
  __exportStar(require("./llamacpp/index.cjs"), exports);
23
23
  __exportStar(require("./lmnt/index.cjs"), exports);
24
+ __exportStar(require("./mistral/index.cjs"), exports);
24
25
  __exportStar(require("./ollama/index.cjs"), exports);
25
26
  __exportStar(require("./openai/index.cjs"), exports);
26
27
  __exportStar(require("./openai-compatible/index.cjs"), exports);
@@ -5,6 +5,7 @@ export * from "./elevenlabs/index.js";
5
5
  export * from "./huggingface/index.js";
6
6
  export * from "./llamacpp/index.js";
7
7
  export * from "./lmnt/index.js";
8
+ export * from "./mistral/index.js";
8
9
  export * from "./ollama/index.js";
9
10
  export * from "./openai/index.js";
10
11
  export * from "./openai-compatible/index.js";
@@ -5,6 +5,7 @@ export * from "./elevenlabs/index.js";
5
5
  export * from "./huggingface/index.js";
6
6
  export * from "./llamacpp/index.js";
7
7
  export * from "./lmnt/index.js";
8
+ export * from "./mistral/index.js";
8
9
  export * from "./ollama/index.js";
9
10
  export * from "./openai/index.js";
10
11
  export * from "./openai-compatible/index.js";
@@ -57,6 +57,7 @@ class LlamaCppTextGenerationModel extends AbstractModel_js_1.AbstractModel {
57
57
  "maxCompletionTokens",
58
58
  "stopSequences",
59
59
  "contextWindowSize",
60
+ "cachePrompt",
60
61
  "temperature",
61
62
  "topK",
62
63
  "topP",
@@ -197,13 +198,14 @@ const llamaCppTextStreamingResponseSchema = new ZodSchema_js_1.ZodSchema(zod_1.z
197
198
  }),
198
199
  llamaCppTextGenerationResponseSchema,
199
200
  ]));
200
- async function callLlamaCppTextGenerationAPI({ api = new LlamaCppApiConfiguration_js_1.LlamaCppApiConfiguration(), abortSignal, responseFormat, prompt, temperature, topK, topP, nPredict, nKeep, stop, tfsZ, typicalP, repeatPenalty, repeatLastN, penalizeNl, mirostat, mirostatTau, mirostatEta, seed, ignoreEos, logitBias, }) {
201
+ async function callLlamaCppTextGenerationAPI({ api = new LlamaCppApiConfiguration_js_1.LlamaCppApiConfiguration(), abortSignal, responseFormat, prompt, cachePrompt, temperature, topK, topP, nPredict, nKeep, stop, tfsZ, typicalP, repeatPenalty, repeatLastN, penalizeNl, mirostat, mirostatTau, mirostatEta, seed, ignoreEos, logitBias, }) {
201
202
  return (0, postToApi_js_1.postJsonToApi)({
202
203
  url: api.assembleUrl(`/completion`),
203
204
  headers: api.headers,
204
205
  body: {
205
206
  stream: responseFormat.stream,
206
207
  prompt: prompt.text,
208
+ cache_prompt: cachePrompt,
207
209
  temperature,
208
210
  top_k: topK,
209
211
  top_p: topP,
@@ -15,6 +15,10 @@ export interface LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE extends
15
15
  * Llama.cpp server.
16
16
  */
17
17
  contextWindowSize?: CONTEXT_WINDOW_SIZE;
18
+ /**
19
+ * Save the prompt and generation for avoid reprocess entire prompt if a part of this isn't change (default: false)
20
+ */
21
+ cachePrompt?: boolean;
18
22
  temperature?: number;
19
23
  topK?: number;
20
24
  topP?: number;
@@ -54,6 +54,7 @@ export class LlamaCppTextGenerationModel extends AbstractModel {
54
54
  "maxCompletionTokens",
55
55
  "stopSequences",
56
56
  "contextWindowSize",
57
+ "cachePrompt",
57
58
  "temperature",
58
59
  "topK",
59
60
  "topP",
@@ -193,13 +194,14 @@ const llamaCppTextStreamingResponseSchema = new ZodSchema(z.discriminatedUnion("
193
194
  }),
194
195
  llamaCppTextGenerationResponseSchema,
195
196
  ]));
196
- async function callLlamaCppTextGenerationAPI({ api = new LlamaCppApiConfiguration(), abortSignal, responseFormat, prompt, temperature, topK, topP, nPredict, nKeep, stop, tfsZ, typicalP, repeatPenalty, repeatLastN, penalizeNl, mirostat, mirostatTau, mirostatEta, seed, ignoreEos, logitBias, }) {
197
+ async function callLlamaCppTextGenerationAPI({ api = new LlamaCppApiConfiguration(), abortSignal, responseFormat, prompt, cachePrompt, temperature, topK, topP, nPredict, nKeep, stop, tfsZ, typicalP, repeatPenalty, repeatLastN, penalizeNl, mirostat, mirostatTau, mirostatEta, seed, ignoreEos, logitBias, }) {
197
198
  return postJsonToApi({
198
199
  url: api.assembleUrl(`/completion`),
199
200
  headers: api.headers,
200
201
  body: {
201
202
  stream: responseFormat.stream,
202
203
  prompt: prompt.text,
204
+ cache_prompt: cachePrompt,
203
205
  temperature,
204
206
  top_k: topK,
205
207
  top_p: topP,
@@ -0,0 +1,22 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.MistralApiConfiguration = void 0;
4
+ const BaseUrlApiConfiguration_js_1 = require("../../core/api/BaseUrlApiConfiguration.cjs");
5
+ const loadApiKey_js_1 = require("../../core/api/loadApiKey.cjs");
6
+ class MistralApiConfiguration extends BaseUrlApiConfiguration_js_1.BaseUrlApiConfiguration {
7
+ constructor({ baseUrl = "https://api.mistral.ai/v1", apiKey, retry, throttle, } = {}) {
8
+ super({
9
+ baseUrl,
10
+ headers: {
11
+ Authorization: `Bearer ${(0, loadApiKey_js_1.loadApiKey)({
12
+ apiKey,
13
+ environmentVariableName: "MISTRAL_API_KEY",
14
+ description: "Mistral",
15
+ })}`,
16
+ },
17
+ retry,
18
+ throttle,
19
+ });
20
+ }
21
+ }
22
+ exports.MistralApiConfiguration = MistralApiConfiguration;
@@ -0,0 +1,12 @@
1
+ import { BaseUrlApiConfiguration } from "../../core/api/BaseUrlApiConfiguration.js";
2
+ import { RetryFunction } from "../../core/api/RetryFunction.js";
3
+ import { ThrottleFunction } from "../../core/api/ThrottleFunction.js";
4
+ export type MistralApiConfigurationSettings = {
5
+ baseUrl?: string;
6
+ apiKey?: string;
7
+ retry?: RetryFunction;
8
+ throttle?: ThrottleFunction;
9
+ };
10
+ export declare class MistralApiConfiguration extends BaseUrlApiConfiguration {
11
+ constructor({ baseUrl, apiKey, retry, throttle, }?: MistralApiConfigurationSettings);
12
+ }
@@ -0,0 +1,18 @@
1
+ import { BaseUrlApiConfiguration } from "../../core/api/BaseUrlApiConfiguration.js";
2
+ import { loadApiKey } from "../../core/api/loadApiKey.js";
3
+ export class MistralApiConfiguration extends BaseUrlApiConfiguration {
4
+ constructor({ baseUrl = "https://api.mistral.ai/v1", apiKey, retry, throttle, } = {}) {
5
+ super({
6
+ baseUrl,
7
+ headers: {
8
+ Authorization: `Bearer ${loadApiKey({
9
+ apiKey,
10
+ environmentVariableName: "MISTRAL_API_KEY",
11
+ description: "Mistral",
12
+ })}`,
13
+ },
14
+ retry,
15
+ throttle,
16
+ });
17
+ }
18
+ }
@@ -0,0 +1,17 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.failedMistralCallResponseHandler = void 0;
4
+ const zod_1 = require("zod");
5
+ const postToApi_js_1 = require("../../core/api/postToApi.cjs");
6
+ const ZodSchema_js_1 = require("../../core/schema/ZodSchema.cjs");
7
+ const mistralErrorDataSchema = new ZodSchema_js_1.ZodSchema(zod_1.z.object({
8
+ object: zod_1.z.literal("error"),
9
+ message: zod_1.z.string(),
10
+ type: zod_1.z.string(),
11
+ param: zod_1.z.string().nullable(),
12
+ code: zod_1.z.string(),
13
+ }));
14
+ exports.failedMistralCallResponseHandler = (0, postToApi_js_1.createJsonErrorResponseHandler)({
15
+ errorSchema: mistralErrorDataSchema,
16
+ errorToMessage: (error) => error.message,
17
+ });
@@ -0,0 +1,13 @@
1
+ import { ApiCallError } from "../../core/api/ApiCallError.js";
2
+ import { ResponseHandler } from "../../core/api/postToApi.js";
3
+ import { ZodSchema } from "../../core/schema/ZodSchema.js";
4
+ declare const mistralErrorDataSchema: ZodSchema<{
5
+ object: "error";
6
+ message: string;
7
+ code: string;
8
+ type: string;
9
+ param: string | null;
10
+ }>;
11
+ export type MistralErrorData = (typeof mistralErrorDataSchema)["_type"];
12
+ export declare const failedMistralCallResponseHandler: ResponseHandler<ApiCallError>;
13
+ export {};
@@ -0,0 +1,14 @@
1
+ import { z } from "zod";
2
+ import { createJsonErrorResponseHandler, } from "../../core/api/postToApi.js";
3
+ import { ZodSchema } from "../../core/schema/ZodSchema.js";
4
+ const mistralErrorDataSchema = new ZodSchema(z.object({
5
+ object: z.literal("error"),
6
+ message: z.string(),
7
+ type: z.string(),
8
+ param: z.string().nullable(),
9
+ code: z.string(),
10
+ }));
11
+ export const failedMistralCallResponseHandler = createJsonErrorResponseHandler({
12
+ errorSchema: mistralErrorDataSchema,
13
+ errorToMessage: (error) => error.message,
14
+ });
@@ -0,0 +1,18 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.TextEmbedder = exports.TextGenerator = exports.Api = void 0;
4
+ const MistralApiConfiguration_js_1 = require("./MistralApiConfiguration.cjs");
5
+ const MistralTextEmbeddingModel_js_1 = require("./MistralTextEmbeddingModel.cjs");
6
+ const MistralTextGenerationModel_js_1 = require("./MistralTextGenerationModel.cjs");
7
+ function Api(settings) {
8
+ return new MistralApiConfiguration_js_1.MistralApiConfiguration(settings);
9
+ }
10
+ exports.Api = Api;
11
+ function TextGenerator(settings) {
12
+ return new MistralTextGenerationModel_js_1.MistralTextGenerationModel(settings);
13
+ }
14
+ exports.TextGenerator = TextGenerator;
15
+ function TextEmbedder(settings) {
16
+ return new MistralTextEmbeddingModel_js_1.MistralTextEmbeddingModel(settings);
17
+ }
18
+ exports.TextEmbedder = TextEmbedder;
@@ -0,0 +1,6 @@
1
+ import { MistralApiConfiguration, MistralApiConfigurationSettings } from "./MistralApiConfiguration.js";
2
+ import { MistralTextEmbeddingModel, MistralTextEmbeddingModelSettings } from "./MistralTextEmbeddingModel.js";
3
+ import { MistralTextGenerationModel, MistralTextGenerationModelSettings } from "./MistralTextGenerationModel.js";
4
+ export declare function Api(settings: MistralApiConfigurationSettings): MistralApiConfiguration;
5
+ export declare function TextGenerator(settings: MistralTextGenerationModelSettings): MistralTextGenerationModel;
6
+ export declare function TextEmbedder(settings: MistralTextEmbeddingModelSettings): MistralTextEmbeddingModel;
@@ -0,0 +1,12 @@
1
+ import { MistralApiConfiguration, } from "./MistralApiConfiguration.js";
2
+ import { MistralTextEmbeddingModel, } from "./MistralTextEmbeddingModel.js";
3
+ import { MistralTextGenerationModel, } from "./MistralTextGenerationModel.js";
4
+ export function Api(settings) {
5
+ return new MistralApiConfiguration(settings);
6
+ }
7
+ export function TextGenerator(settings) {
8
+ return new MistralTextGenerationModel(settings);
9
+ }
10
+ export function TextEmbedder(settings) {
11
+ return new MistralTextEmbeddingModel(settings);
12
+ }
@@ -0,0 +1,64 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.chat = exports.instruction = exports.text = void 0;
4
+ const ChatPrompt_js_1 = require("../../model-function/generate-text/prompt-template/ChatPrompt.cjs");
5
+ /**
6
+ * Formats a text prompt as a Mistral prompt.
7
+ */
8
+ function text() {
9
+ return {
10
+ format: (prompt) => [{ role: "user", content: prompt }],
11
+ stopSequences: [],
12
+ };
13
+ }
14
+ exports.text = text;
15
+ /**
16
+ * Formats an instruction prompt as a Mistral prompt.
17
+ */
18
+ function instruction() {
19
+ return {
20
+ format(prompt) {
21
+ const messages = [];
22
+ if (prompt.system != null) {
23
+ messages.push({ role: "system", content: prompt.system });
24
+ }
25
+ messages.push({ role: "user", content: prompt.instruction });
26
+ return messages;
27
+ },
28
+ stopSequences: [],
29
+ };
30
+ }
31
+ exports.instruction = instruction;
32
+ /**
33
+ * Formats a chat prompt as a Mistral prompt.
34
+ */
35
+ function chat() {
36
+ return {
37
+ format(prompt) {
38
+ (0, ChatPrompt_js_1.validateChatPrompt)(prompt);
39
+ const messages = [];
40
+ if (prompt.system != null) {
41
+ messages.push({ role: "system", content: prompt.system });
42
+ }
43
+ for (const { role, content } of prompt.messages) {
44
+ switch (role) {
45
+ case "user": {
46
+ messages.push({ role: "user", content });
47
+ break;
48
+ }
49
+ case "assistant": {
50
+ messages.push({ role: "assistant", content });
51
+ break;
52
+ }
53
+ default: {
54
+ const _exhaustiveCheck = role;
55
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
56
+ }
57
+ }
58
+ }
59
+ return messages;
60
+ },
61
+ stopSequences: [],
62
+ };
63
+ }
64
+ exports.chat = chat;
@@ -0,0 +1,16 @@
1
+ import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
2
+ import { TextChatPrompt } from "../../model-function/generate-text/prompt-template/ChatPrompt.js";
3
+ import { TextInstructionPrompt } from "../../model-function/generate-text/prompt-template/InstructionPrompt.js";
4
+ import { MistralTextGenerationPrompt } from "./MistralTextGenerationModel.js";
5
+ /**
6
+ * Formats a text prompt as a Mistral prompt.
7
+ */
8
+ export declare function text(): TextGenerationPromptTemplate<string, MistralTextGenerationPrompt>;
9
+ /**
10
+ * Formats an instruction prompt as a Mistral prompt.
11
+ */
12
+ export declare function instruction(): TextGenerationPromptTemplate<TextInstructionPrompt, MistralTextGenerationPrompt>;
13
+ /**
14
+ * Formats a chat prompt as a Mistral prompt.
15
+ */
16
+ export declare function chat(): TextGenerationPromptTemplate<TextChatPrompt, MistralTextGenerationPrompt>;
@@ -0,0 +1,58 @@
1
+ import { validateChatPrompt, } from "../../model-function/generate-text/prompt-template/ChatPrompt.js";
2
+ /**
3
+ * Formats a text prompt as a Mistral prompt.
4
+ */
5
+ export function text() {
6
+ return {
7
+ format: (prompt) => [{ role: "user", content: prompt }],
8
+ stopSequences: [],
9
+ };
10
+ }
11
+ /**
12
+ * Formats an instruction prompt as a Mistral prompt.
13
+ */
14
+ export function instruction() {
15
+ return {
16
+ format(prompt) {
17
+ const messages = [];
18
+ if (prompt.system != null) {
19
+ messages.push({ role: "system", content: prompt.system });
20
+ }
21
+ messages.push({ role: "user", content: prompt.instruction });
22
+ return messages;
23
+ },
24
+ stopSequences: [],
25
+ };
26
+ }
27
+ /**
28
+ * Formats a chat prompt as a Mistral prompt.
29
+ */
30
+ export function chat() {
31
+ return {
32
+ format(prompt) {
33
+ validateChatPrompt(prompt);
34
+ const messages = [];
35
+ if (prompt.system != null) {
36
+ messages.push({ role: "system", content: prompt.system });
37
+ }
38
+ for (const { role, content } of prompt.messages) {
39
+ switch (role) {
40
+ case "user": {
41
+ messages.push({ role: "user", content });
42
+ break;
43
+ }
44
+ case "assistant": {
45
+ messages.push({ role: "assistant", content });
46
+ break;
47
+ }
48
+ default: {
49
+ const _exhaustiveCheck = role;
50
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
51
+ }
52
+ }
53
+ }
54
+ return messages;
55
+ },
56
+ stopSequences: [],
57
+ };
58
+ }
@@ -0,0 +1,100 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.MistralTextEmbeddingModel = void 0;
4
+ const zod_1 = require("zod");
5
+ const callWithRetryAndThrottle_js_1 = require("../../core/api/callWithRetryAndThrottle.cjs");
6
+ const postToApi_js_1 = require("../../core/api/postToApi.cjs");
7
+ const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
8
+ const MistralApiConfiguration_js_1 = require("./MistralApiConfiguration.cjs");
9
+ const MistralError_js_1 = require("./MistralError.cjs");
10
+ class MistralTextEmbeddingModel extends AbstractModel_js_1.AbstractModel {
11
+ constructor(settings) {
12
+ super({ settings });
13
+ Object.defineProperty(this, "provider", {
14
+ enumerable: true,
15
+ configurable: true,
16
+ writable: true,
17
+ value: "mistral"
18
+ });
19
+ Object.defineProperty(this, "maxValuesPerCall", {
20
+ enumerable: true,
21
+ configurable: true,
22
+ writable: true,
23
+ value: 32
24
+ });
25
+ /**
26
+ * Parallel calls are technically possible, but I have been hitting rate limits and disabled
27
+ * them for now.
28
+ */
29
+ Object.defineProperty(this, "isParallelizable", {
30
+ enumerable: true,
31
+ configurable: true,
32
+ writable: true,
33
+ value: false
34
+ });
35
+ Object.defineProperty(this, "embeddingDimensions", {
36
+ enumerable: true,
37
+ configurable: true,
38
+ writable: true,
39
+ value: 1024
40
+ });
41
+ }
42
+ get modelName() {
43
+ return this.settings.model;
44
+ }
45
+ async callAPI(texts, options) {
46
+ if (texts.length > this.maxValuesPerCall) {
47
+ throw new Error(`The Mistral embedding API only supports ${this.maxValuesPerCall} texts per API call.`);
48
+ }
49
+ const api = this.settings.api ?? new MistralApiConfiguration_js_1.MistralApiConfiguration();
50
+ const abortSignal = options?.run?.abortSignal;
51
+ const model = this.settings.model;
52
+ const encodingFormat = this.settings.encodingFormat ?? "float";
53
+ return (0, callWithRetryAndThrottle_js_1.callWithRetryAndThrottle)({
54
+ retry: this.settings.api?.retry,
55
+ throttle: this.settings.api?.throttle,
56
+ call: async () => (0, postToApi_js_1.postJsonToApi)({
57
+ url: api.assembleUrl(`/embeddings`),
58
+ headers: api.headers,
59
+ body: {
60
+ model,
61
+ input: texts,
62
+ encoding_format: encodingFormat,
63
+ },
64
+ failedResponseHandler: MistralError_js_1.failedMistralCallResponseHandler,
65
+ successfulResponseHandler: (0, postToApi_js_1.createJsonResponseHandler)(MistralTextEmbeddingResponseSchema),
66
+ abortSignal,
67
+ }),
68
+ });
69
+ }
70
+ get settingsForEvent() {
71
+ return {
72
+ encodingFormat: this.settings.encodingFormat,
73
+ };
74
+ }
75
+ async doEmbedValues(texts, options) {
76
+ const response = await this.callAPI(texts, options);
77
+ return {
78
+ response,
79
+ embeddings: response.data.map((entry) => entry.embedding),
80
+ };
81
+ }
82
+ withSettings(additionalSettings) {
83
+ return new MistralTextEmbeddingModel(Object.assign({}, this.settings, additionalSettings));
84
+ }
85
+ }
86
+ exports.MistralTextEmbeddingModel = MistralTextEmbeddingModel;
87
+ const MistralTextEmbeddingResponseSchema = zod_1.z.object({
88
+ id: zod_1.z.string(),
89
+ object: zod_1.z.string(),
90
+ data: zod_1.z.array(zod_1.z.object({
91
+ object: zod_1.z.string(),
92
+ embedding: zod_1.z.array(zod_1.z.number()),
93
+ index: zod_1.z.number(),
94
+ })),
95
+ model: zod_1.z.string(),
96
+ usage: zod_1.z.object({
97
+ prompt_tokens: zod_1.z.number(),
98
+ total_tokens: zod_1.z.number(),
99
+ }),
100
+ });