modelfusion 0.99.0 → 0.100.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. package/README.md +9 -15
  2. package/guard/fixStructure.cjs +3 -3
  3. package/guard/fixStructure.d.ts +3 -3
  4. package/guard/fixStructure.js +3 -3
  5. package/model-function/generate-structure/generateStructure.d.ts +2 -2
  6. package/model-function/generate-structure/streamStructure.d.ts +1 -1
  7. package/model-provider/mistral/{MistralTextGenerationModel.cjs → MistralChatModel.cjs} +13 -13
  8. package/model-provider/mistral/{MistralTextGenerationModel.d.ts → MistralChatModel.d.ts} +21 -20
  9. package/model-provider/mistral/{MistralTextGenerationModel.js → MistralChatModel.js} +11 -11
  10. package/model-provider/mistral/MistralFacade.cjs +5 -5
  11. package/model-provider/mistral/MistralFacade.d.ts +3 -2
  12. package/model-provider/mistral/MistralFacade.js +3 -3
  13. package/model-provider/mistral/MistralPromptTemplate.d.ts +4 -4
  14. package/model-provider/mistral/index.cjs +1 -1
  15. package/model-provider/mistral/index.d.ts +1 -1
  16. package/model-provider/mistral/index.js +1 -1
  17. package/model-provider/ollama/OllamaApiConfiguration.d.ts +6 -5
  18. package/model-provider/ollama/OllamaChatModel.cjs +303 -0
  19. package/model-provider/ollama/OllamaChatModel.d.ts +171 -0
  20. package/model-provider/ollama/OllamaChatModel.js +299 -0
  21. package/model-provider/ollama/OllamaChatPromptTemplate.cjs +76 -0
  22. package/model-provider/ollama/OllamaChatPromptTemplate.d.ts +20 -0
  23. package/model-provider/ollama/OllamaChatPromptTemplate.js +69 -0
  24. package/model-provider/ollama/{OllamaTextGenerationModel.cjs → OllamaCompletionModel.cjs} +13 -11
  25. package/model-provider/ollama/OllamaCompletionModel.d.ts +159 -0
  26. package/model-provider/ollama/{OllamaTextGenerationModel.js → OllamaCompletionModel.js} +11 -9
  27. package/model-provider/ollama/{OllamaTextGenerationModel.test.cjs → OllamaCompletionModel.test.cjs} +3 -3
  28. package/model-provider/ollama/{OllamaTextGenerationModel.test.js → OllamaCompletionModel.test.js} +3 -3
  29. package/model-provider/ollama/OllamaFacade.cjs +15 -5
  30. package/model-provider/ollama/OllamaFacade.d.ts +7 -2
  31. package/model-provider/ollama/OllamaFacade.js +11 -3
  32. package/model-provider/ollama/OllamaTextGenerationSettings.cjs +2 -0
  33. package/model-provider/ollama/OllamaTextGenerationSettings.d.ts +87 -0
  34. package/model-provider/ollama/OllamaTextGenerationSettings.js +1 -0
  35. package/model-provider/ollama/index.cjs +4 -1
  36. package/model-provider/ollama/index.d.ts +4 -1
  37. package/model-provider/ollama/index.js +4 -1
  38. package/model-provider/openai/OpenAIFacade.cjs +4 -2
  39. package/model-provider/openai/OpenAIFacade.d.ts +3 -1
  40. package/model-provider/openai/OpenAIFacade.js +2 -1
  41. package/model-provider/openai/chat/AbstractOpenAIChatModel.d.ts +1 -1
  42. package/model-provider/openai/chat/OpenAIChatFunctionCallStructureGenerationModel.d.ts +3 -3
  43. package/model-provider/openai/chat/OpenAIChatModel.cjs +1 -1
  44. package/model-provider/openai/chat/OpenAIChatModel.d.ts +2 -2
  45. package/model-provider/openai/chat/OpenAIChatModel.js +1 -1
  46. package/model-provider/openai/chat/OpenAIChatPromptTemplate.d.ts +5 -5
  47. package/model-provider/openai/chat/OpenAIChatPromptTemplate.js +1 -1
  48. package/model-provider/openai-compatible/OpenAICompatibleFacade.cjs +1 -1
  49. package/model-provider/openai-compatible/OpenAICompatibleFacade.d.ts +1 -1
  50. package/model-provider/openai-compatible/OpenAICompatibleFacade.js +1 -1
  51. package/package.json +1 -1
  52. package/model-provider/ollama/OllamaTextGenerationModel.d.ts +0 -230
  53. /package/model-provider/ollama/{OllamaTextGenerationModel.test.d.ts → OllamaCompletionModel.test.d.ts} +0 -0
package/README.md CHANGED
@@ -85,7 +85,7 @@ Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai),
85
85
  Multi-modal vision models such as GPT 4 Vision can process images as part of the prompt.
86
86
 
87
87
  ```ts
88
- import { streamText, openai, OpenAIChatMessage } from "modelfusion";
88
+ import { streamText, openai } from "modelfusion";
89
89
  import { readFileSync } from "fs";
90
90
 
91
91
  const image = readFileSync("./image.png").toString("base64");
@@ -93,7 +93,7 @@ const image = readFileSync("./image.png").toString("base64");
93
93
  const textStream = await streamText(
94
94
  openai.ChatTextGenerator({ model: "gpt-4-vision-preview" }),
95
95
  [
96
- OpenAIChatMessage.user([
96
+ openai.ChatMessage.user([
97
97
  { type: "text", text: "Describe the image in detail:" },
98
98
  { type: "image", base64Image: image, mimeType: "image/png" },
99
99
  ]),
@@ -355,14 +355,14 @@ const result = await guard(
355
355
  fixStructure({
356
356
  modifyInputForRetry: async ({ input, error }) => [
357
357
  ...input,
358
- OpenAIChatMessage.assistant(null, {
358
+ openai.ChatMessage.assistant(null, {
359
359
  functionCall: {
360
360
  name: "sentiment",
361
361
  arguments: JSON.stringify(error.valueText),
362
362
  },
363
363
  }),
364
- OpenAIChatMessage.user(error.message),
365
- OpenAIChatMessage.user("Please fix the error and try again."),
364
+ openai.ChatMessage.user(error.message),
365
+ openai.ChatMessage.user("Please fix the error and try again."),
366
366
  ],
367
367
  })
368
368
  );
@@ -418,7 +418,7 @@ With `generateToolCall`, you can generate a tool call for a specific tool with a
418
418
  const { id, name, args } = await generateToolCall(
419
419
  openai.ChatTextGenerator({ model: "gpt-3.5-turbo" }),
420
420
  calculator,
421
- [OpenAIChatMessage.user("What's fourteen times twelve?")]
421
+ [openai.ChatMessage.user("What's fourteen times twelve?")]
422
422
  );
423
423
  ```
424
424
 
@@ -430,7 +430,7 @@ With `generateToolCallsOrText`, you can ask a language model to generate several
430
430
  const { text, toolCalls } = await generateToolCallsOrText(
431
431
  openai.ChatTextGenerator({ model: "gpt-3.5-turbo" }),
432
432
  [toolA, toolB, toolC],
433
- [OpenAIChatMessage.user(query)]
433
+ [openai.ChatMessage.user(query)]
434
434
  );
435
435
  ```
436
436
 
@@ -454,7 +454,7 @@ With `useTool`, you can use a tool with a language model that supports tools cal
454
454
  const { tool, toolCall, args, ok, result } = await useTool(
455
455
  openai.ChatTextGenerator({ model: "gpt-3.5-turbo" }),
456
456
  calculator,
457
- [OpenAIChatMessage.user("What's fourteen times twelve?")]
457
+ [openai.ChatMessage.user("What's fourteen times twelve?")]
458
458
  );
459
459
 
460
460
  console.log(`Tool call:`, toolCall);
@@ -472,7 +472,7 @@ With `useToolsOrGenerateText`, you can ask a language model to generate several
472
472
  const { text, toolResults } = await useToolsOrGenerateText(
473
473
  openai.ChatTextGenerator({ model: "gpt-3.5-turbo" }),
474
474
  [calculator /* ... */],
475
- [OpenAIChatMessage.user("What's fourteen times twelve?")]
475
+ [openai.ChatMessage.user("What's fourteen times twelve?")]
476
476
  );
477
477
  ```
478
478
 
@@ -767,12 +767,6 @@ Examples for almost all of the individual functions and objects. Highly recommen
767
767
 
768
768
  StoryTeller is an exploratory web application that creates short audio stories for pre-school kids.
769
769
 
770
- ### [Chatbot (Terminal)](https://github.com/lgrammel/modelfusion/tree/main/examples/chatbot-terminal)
771
-
772
- > _Terminal app_, _chat_, _llama.cpp_
773
-
774
- A chat with an AI assistant, implemented as a terminal app.
775
-
776
770
  ### [Chatbot (Next.JS)](https://github.com/lgrammel/modelfusion/tree/main/examples/chatbot-next-js)
777
771
 
778
772
  > _Next.js app_, _OpenAI GPT-3.5-turbo_, _streaming_, _abort handling_
@@ -45,14 +45,14 @@ const StructureValidationError_js_1 = require("../model-function/generate-struct
45
45
  * fixStructure({
46
46
  * modifyInputForRetry: async ({ input, error }) => [
47
47
  * ...input,
48
- * OpenAIChatMessage.assistant(null, {
48
+ * openai.ChatMessage.assistant(null, {
49
49
  * functionCall: {
50
50
  * name: "sentiment",
51
51
  * arguments: JSON.stringify(error.valueText),
52
52
  * },
53
53
  * }),
54
- * OpenAIChatMessage.user(error.message),
55
- * OpenAIChatMessage.user("Please fix the error and try again."),
54
+ * openai.ChatMessage.user(error.message),
55
+ * openai.ChatMessage.user("Please fix the error and try again."),
56
56
  * ],
57
57
  * })
58
58
  * );
@@ -43,14 +43,14 @@ import { Guard } from "./guard.js";
43
43
  * fixStructure({
44
44
  * modifyInputForRetry: async ({ input, error }) => [
45
45
  * ...input,
46
- * OpenAIChatMessage.assistant(null, {
46
+ * openai.ChatMessage.assistant(null, {
47
47
  * functionCall: {
48
48
  * name: "sentiment",
49
49
  * arguments: JSON.stringify(error.valueText),
50
50
  * },
51
51
  * }),
52
- * OpenAIChatMessage.user(error.message),
53
- * OpenAIChatMessage.user("Please fix the error and try again."),
52
+ * openai.ChatMessage.user(error.message),
53
+ * openai.ChatMessage.user("Please fix the error and try again."),
54
54
  * ],
55
55
  * })
56
56
  * );
@@ -42,14 +42,14 @@ import { StructureValidationError } from "../model-function/generate-structure/S
42
42
  * fixStructure({
43
43
  * modifyInputForRetry: async ({ input, error }) => [
44
44
  * ...input,
45
- * OpenAIChatMessage.assistant(null, {
45
+ * openai.ChatMessage.assistant(null, {
46
46
  * functionCall: {
47
47
  * name: "sentiment",
48
48
  * arguments: JSON.stringify(error.valueText),
49
49
  * },
50
50
  * }),
51
- * OpenAIChatMessage.user(error.message),
52
- * OpenAIChatMessage.user("Please fix the error and try again."),
51
+ * openai.ChatMessage.user(error.message),
52
+ * openai.ChatMessage.user("Please fix the error and try again."),
53
53
  * ],
54
54
  * })
55
55
  * );
@@ -17,11 +17,11 @@ import { StructureGenerationModel, StructureGenerationModelSettings } from "./St
17
17
  * .describe("Sentiment."),
18
18
  * })),
19
19
  * [
20
- * OpenAIChatMessage.system(
20
+ * openai.ChatMessage.system(
21
21
  * "You are a sentiment evaluator. " +
22
22
  * "Analyze the sentiment of the following product review:"
23
23
  * ),
24
- * OpenAIChatMessage.user(
24
+ * openai.ChatMessage.user(
25
25
  * "After I opened the package, I was met by a very unpleasant smell " +
26
26
  * "that did not disappear even after washing. Never again!"
27
27
  * ),
@@ -38,7 +38,7 @@ export type StructureStreamPart<STRUCTURE> = {
38
38
  * })
39
39
  * ),
40
40
  * [
41
- * OpenAIChatMessage.user(
41
+ * openai.ChatMessage.user(
42
42
  * "Generate 3 character descriptions for a fantasy role playing game."
43
43
  * ),
44
44
  * ]
@@ -1,6 +1,6 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.MistralTextGenerationResponseFormat = exports.MistralTextGenerationModel = void 0;
3
+ exports.MistralChatResponseFormat = exports.MistralChatModel = void 0;
4
4
  const zod_1 = require("zod");
5
5
  const callWithRetryAndThrottle_js_1 = require("../../core/api/callWithRetryAndThrottle.cjs");
6
6
  const postToApi_js_1 = require("../../core/api/postToApi.cjs");
@@ -13,7 +13,7 @@ const parseEventSourceStream_js_1 = require("../../util/streaming/parseEventSour
13
13
  const MistralApiConfiguration_js_1 = require("./MistralApiConfiguration.cjs");
14
14
  const MistralError_js_1 = require("./MistralError.cjs");
15
15
  const MistralPromptTemplate_js_1 = require("./MistralPromptTemplate.cjs");
16
- class MistralTextGenerationModel extends AbstractModel_js_1.AbstractModel {
16
+ class MistralChatModel extends AbstractModel_js_1.AbstractModel {
17
17
  constructor(settings) {
18
18
  super({ settings });
19
19
  Object.defineProperty(this, "provider", {
@@ -85,7 +85,7 @@ class MistralTextGenerationModel extends AbstractModel_js_1.AbstractModel {
85
85
  async doGenerateTexts(prompt, options) {
86
86
  const response = await this.callAPI(prompt, {
87
87
  ...options,
88
- responseFormat: exports.MistralTextGenerationResponseFormat.json,
88
+ responseFormat: exports.MistralChatResponseFormat.json,
89
89
  });
90
90
  return {
91
91
  response,
@@ -95,7 +95,7 @@ class MistralTextGenerationModel extends AbstractModel_js_1.AbstractModel {
95
95
  doStreamText(prompt, options) {
96
96
  return this.callAPI(prompt, {
97
97
  ...options,
98
- responseFormat: exports.MistralTextGenerationResponseFormat.textDeltaIterable,
98
+ responseFormat: exports.MistralChatResponseFormat.textDeltaIterable,
99
99
  });
100
100
  }
101
101
  /**
@@ -123,11 +123,11 @@ class MistralTextGenerationModel extends AbstractModel_js_1.AbstractModel {
123
123
  });
124
124
  }
125
125
  withSettings(additionalSettings) {
126
- return new MistralTextGenerationModel(Object.assign({}, this.settings, additionalSettings));
126
+ return new MistralChatModel(Object.assign({}, this.settings, additionalSettings));
127
127
  }
128
128
  }
129
- exports.MistralTextGenerationModel = MistralTextGenerationModel;
130
- const mistralTextGenerationResponseSchema = zod_1.z.object({
129
+ exports.MistralChatModel = MistralChatModel;
130
+ const mistralChatResponseSchema = zod_1.z.object({
131
131
  id: zod_1.z.string(),
132
132
  object: zod_1.z.string(),
133
133
  created: zod_1.z.number(),
@@ -146,23 +146,23 @@ const mistralTextGenerationResponseSchema = zod_1.z.object({
146
146
  total_tokens: zod_1.z.number(),
147
147
  }),
148
148
  });
149
- exports.MistralTextGenerationResponseFormat = {
149
+ exports.MistralChatResponseFormat = {
150
150
  /**
151
151
  * Returns the response as a JSON object.
152
152
  */
153
153
  json: {
154
154
  stream: false,
155
- handler: (0, postToApi_js_1.createJsonResponseHandler)(mistralTextGenerationResponseSchema),
155
+ handler: (0, postToApi_js_1.createJsonResponseHandler)(mistralChatResponseSchema),
156
156
  },
157
157
  /**
158
158
  * Returns an async iterable over the text deltas (only the tex different of the first choice).
159
159
  */
160
160
  textDeltaIterable: {
161
161
  stream: true,
162
- handler: async ({ response }) => createMistralTextGenerationDeltaIterableQueue(response.body, (delta) => delta[0]?.delta.content ?? ""),
162
+ handler: async ({ response }) => createMistralChatDeltaIterableQueue(response.body, (delta) => delta[0]?.delta.content ?? ""),
163
163
  },
164
164
  };
165
- const mistralTextGenerationChunkSchema = new ZodSchema_js_1.ZodSchema(zod_1.z.object({
165
+ const mistralChatChunkSchema = new ZodSchema_js_1.ZodSchema(zod_1.z.object({
166
166
  id: zod_1.z.string(),
167
167
  object: zod_1.z.string().optional(),
168
168
  created: zod_1.z.number().optional(),
@@ -179,7 +179,7 @@ const mistralTextGenerationChunkSchema = new ZodSchema_js_1.ZodSchema(zod_1.z.ob
179
179
  .optional(),
180
180
  })),
181
181
  }));
182
- async function createMistralTextGenerationDeltaIterableQueue(stream, extractDeltaValue) {
182
+ async function createMistralChatDeltaIterableQueue(stream, extractDeltaValue) {
183
183
  const queue = new AsyncQueue_js_1.AsyncQueue();
184
184
  const streamDelta = [];
185
185
  // process the stream asynchonously (no 'await' on purpose):
@@ -194,7 +194,7 @@ async function createMistralTextGenerationDeltaIterableQueue(stream, extractDelt
194
194
  }
195
195
  const parseResult = (0, parseJSON_js_1.safeParseJSON)({
196
196
  text: data,
197
- schema: mistralTextGenerationChunkSchema,
197
+ schema: mistralChatChunkSchema,
198
198
  });
199
199
  if (!parseResult.success) {
200
200
  queue.push({
@@ -7,11 +7,12 @@ import { Delta } from "../../model-function/Delta.js";
7
7
  import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
8
8
  import { TextGenerationModelSettings, TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
9
9
  import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
10
- export type MistralTextGenerationPrompt = Array<{
10
+ export type MistralChatMessage = {
11
11
  role: "system" | "user" | "assistant";
12
12
  content: string;
13
- }>;
14
- export interface MistralTextGenerationModelSettings extends TextGenerationModelSettings {
13
+ };
14
+ export type MistralChatPrompt = Array<MistralChatMessage>;
15
+ export interface MistralChatModelSettings extends TextGenerationModelSettings {
15
16
  api?: ApiConfiguration;
16
17
  model: "mistral-tiny" | "mistral-small" | "mistral-medium";
17
18
  /**
@@ -46,18 +47,18 @@ export interface MistralTextGenerationModelSettings extends TextGenerationModelS
46
47
  */
47
48
  randomSeed?: number | null;
48
49
  }
49
- export declare class MistralTextGenerationModel extends AbstractModel<MistralTextGenerationModelSettings> implements TextStreamingModel<MistralTextGenerationPrompt, MistralTextGenerationModelSettings> {
50
- constructor(settings: MistralTextGenerationModelSettings);
50
+ export declare class MistralChatModel extends AbstractModel<MistralChatModelSettings> implements TextStreamingModel<MistralChatPrompt, MistralChatModelSettings> {
51
+ constructor(settings: MistralChatModelSettings);
51
52
  readonly provider = "mistral";
52
53
  get modelName(): "mistral-tiny" | "mistral-small" | "mistral-medium";
53
54
  readonly contextWindowSize: undefined;
54
55
  readonly tokenizer: undefined;
55
56
  readonly countPromptTokens: undefined;
56
- callAPI<RESULT>(prompt: MistralTextGenerationPrompt, options: {
57
- responseFormat: MistralTextGenerationResponseFormatType<RESULT>;
57
+ callAPI<RESULT>(prompt: MistralChatPrompt, options: {
58
+ responseFormat: MistralChatResponseFormatType<RESULT>;
58
59
  } & FunctionOptions): Promise<RESULT>;
59
- get settingsForEvent(): Partial<MistralTextGenerationModelSettings>;
60
- doGenerateTexts(prompt: MistralTextGenerationPrompt, options?: FunctionOptions): Promise<{
60
+ get settingsForEvent(): Partial<MistralChatModelSettings>;
61
+ doGenerateTexts(prompt: MistralChatPrompt, options?: FunctionOptions): Promise<{
61
62
  response: {
62
63
  object: string;
63
64
  usage: {
@@ -79,23 +80,23 @@ export declare class MistralTextGenerationModel extends AbstractModel<MistralTex
79
80
  };
80
81
  texts: string[];
81
82
  }>;
82
- doStreamText(prompt: MistralTextGenerationPrompt, options?: FunctionOptions): Promise<AsyncIterable<Delta<string>>>;
83
+ doStreamText(prompt: MistralChatPrompt, options?: FunctionOptions): Promise<AsyncIterable<Delta<string>>>;
83
84
  /**
84
85
  * Returns this model with a text prompt template.
85
86
  */
86
- withTextPrompt(): PromptTemplateTextStreamingModel<string, MistralTextGenerationPrompt, MistralTextGenerationModelSettings, this>;
87
+ withTextPrompt(): PromptTemplateTextStreamingModel<string, MistralChatPrompt, MistralChatModelSettings, this>;
87
88
  /**
88
89
  * Returns this model with an instruction prompt template.
89
90
  */
90
- withInstructionPrompt(): PromptTemplateTextStreamingModel<import("../../index.js").TextInstructionPrompt, MistralTextGenerationPrompt, MistralTextGenerationModelSettings, this>;
91
+ withInstructionPrompt(): PromptTemplateTextStreamingModel<import("../../index.js").TextInstructionPrompt, MistralChatPrompt, MistralChatModelSettings, this>;
91
92
  /**
92
93
  * Returns this model with a chat prompt template.
93
94
  */
94
- withChatPrompt(): PromptTemplateTextStreamingModel<import("../../index.js").TextChatPrompt, MistralTextGenerationPrompt, MistralTextGenerationModelSettings, this>;
95
- withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, MistralTextGenerationPrompt>): PromptTemplateTextStreamingModel<INPUT_PROMPT, MistralTextGenerationPrompt, MistralTextGenerationModelSettings, this>;
96
- withSettings(additionalSettings: Partial<MistralTextGenerationModelSettings>): this;
95
+ withChatPrompt(): PromptTemplateTextStreamingModel<import("../../index.js").TextChatPrompt, MistralChatPrompt, MistralChatModelSettings, this>;
96
+ withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, MistralChatPrompt>): PromptTemplateTextStreamingModel<INPUT_PROMPT, MistralChatPrompt, MistralChatModelSettings, this>;
97
+ withSettings(additionalSettings: Partial<MistralChatModelSettings>): this;
97
98
  }
98
- declare const mistralTextGenerationResponseSchema: z.ZodObject<{
99
+ declare const mistralChatResponseSchema: z.ZodObject<{
99
100
  id: z.ZodString;
100
101
  object: z.ZodString;
101
102
  created: z.ZodNumber;
@@ -178,12 +179,12 @@ declare const mistralTextGenerationResponseSchema: z.ZodObject<{
178
179
  index: number;
179
180
  }[];
180
181
  }>;
181
- export type MistralTextGenerationResponse = z.infer<typeof mistralTextGenerationResponseSchema>;
182
- export type MistralTextGenerationResponseFormatType<T> = {
182
+ export type MistralChatResponse = z.infer<typeof mistralChatResponseSchema>;
183
+ export type MistralChatResponseFormatType<T> = {
183
184
  stream: boolean;
184
185
  handler: ResponseHandler<T>;
185
186
  };
186
- export declare const MistralTextGenerationResponseFormat: {
187
+ export declare const MistralChatResponseFormat: {
187
188
  /**
188
189
  * Returns the response as a JSON object.
189
190
  */
@@ -219,7 +220,7 @@ export declare const MistralTextGenerationResponseFormat: {
219
220
  }) => Promise<AsyncIterable<Delta<string>>>;
220
221
  };
221
222
  };
222
- export type MistralTextGenerationDelta = Array<{
223
+ export type MistralChatDelta = Array<{
223
224
  role: "assistant" | "user" | undefined;
224
225
  content: string;
225
226
  isComplete: boolean;
@@ -10,7 +10,7 @@ import { parseEventSourceStream } from "../../util/streaming/parseEventSourceStr
10
10
  import { MistralApiConfiguration } from "./MistralApiConfiguration.js";
11
11
  import { failedMistralCallResponseHandler } from "./MistralError.js";
12
12
  import { chat, instruction, text } from "./MistralPromptTemplate.js";
13
- export class MistralTextGenerationModel extends AbstractModel {
13
+ export class MistralChatModel extends AbstractModel {
14
14
  constructor(settings) {
15
15
  super({ settings });
16
16
  Object.defineProperty(this, "provider", {
@@ -82,7 +82,7 @@ export class MistralTextGenerationModel extends AbstractModel {
82
82
  async doGenerateTexts(prompt, options) {
83
83
  const response = await this.callAPI(prompt, {
84
84
  ...options,
85
- responseFormat: MistralTextGenerationResponseFormat.json,
85
+ responseFormat: MistralChatResponseFormat.json,
86
86
  });
87
87
  return {
88
88
  response,
@@ -92,7 +92,7 @@ export class MistralTextGenerationModel extends AbstractModel {
92
92
  doStreamText(prompt, options) {
93
93
  return this.callAPI(prompt, {
94
94
  ...options,
95
- responseFormat: MistralTextGenerationResponseFormat.textDeltaIterable,
95
+ responseFormat: MistralChatResponseFormat.textDeltaIterable,
96
96
  });
97
97
  }
98
98
  /**
@@ -120,10 +120,10 @@ export class MistralTextGenerationModel extends AbstractModel {
120
120
  });
121
121
  }
122
122
  withSettings(additionalSettings) {
123
- return new MistralTextGenerationModel(Object.assign({}, this.settings, additionalSettings));
123
+ return new MistralChatModel(Object.assign({}, this.settings, additionalSettings));
124
124
  }
125
125
  }
126
- const mistralTextGenerationResponseSchema = z.object({
126
+ const mistralChatResponseSchema = z.object({
127
127
  id: z.string(),
128
128
  object: z.string(),
129
129
  created: z.number(),
@@ -142,23 +142,23 @@ const mistralTextGenerationResponseSchema = z.object({
142
142
  total_tokens: z.number(),
143
143
  }),
144
144
  });
145
- export const MistralTextGenerationResponseFormat = {
145
+ export const MistralChatResponseFormat = {
146
146
  /**
147
147
  * Returns the response as a JSON object.
148
148
  */
149
149
  json: {
150
150
  stream: false,
151
- handler: createJsonResponseHandler(mistralTextGenerationResponseSchema),
151
+ handler: createJsonResponseHandler(mistralChatResponseSchema),
152
152
  },
153
153
  /**
154
154
  * Returns an async iterable over the text deltas (only the tex different of the first choice).
155
155
  */
156
156
  textDeltaIterable: {
157
157
  stream: true,
158
- handler: async ({ response }) => createMistralTextGenerationDeltaIterableQueue(response.body, (delta) => delta[0]?.delta.content ?? ""),
158
+ handler: async ({ response }) => createMistralChatDeltaIterableQueue(response.body, (delta) => delta[0]?.delta.content ?? ""),
159
159
  },
160
160
  };
161
- const mistralTextGenerationChunkSchema = new ZodSchema(z.object({
161
+ const mistralChatChunkSchema = new ZodSchema(z.object({
162
162
  id: z.string(),
163
163
  object: z.string().optional(),
164
164
  created: z.number().optional(),
@@ -175,7 +175,7 @@ const mistralTextGenerationChunkSchema = new ZodSchema(z.object({
175
175
  .optional(),
176
176
  })),
177
177
  }));
178
- async function createMistralTextGenerationDeltaIterableQueue(stream, extractDeltaValue) {
178
+ async function createMistralChatDeltaIterableQueue(stream, extractDeltaValue) {
179
179
  const queue = new AsyncQueue();
180
180
  const streamDelta = [];
181
181
  // process the stream asynchonously (no 'await' on purpose):
@@ -190,7 +190,7 @@ async function createMistralTextGenerationDeltaIterableQueue(stream, extractDelt
190
190
  }
191
191
  const parseResult = safeParseJSON({
192
192
  text: data,
193
- schema: mistralTextGenerationChunkSchema,
193
+ schema: mistralChatChunkSchema,
194
194
  });
195
195
  if (!parseResult.success) {
196
196
  queue.push({
@@ -1,17 +1,17 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.TextEmbedder = exports.TextGenerator = exports.Api = void 0;
3
+ exports.TextEmbedder = exports.ChatTextGenerator = exports.Api = void 0;
4
4
  const MistralApiConfiguration_js_1 = require("./MistralApiConfiguration.cjs");
5
5
  const MistralTextEmbeddingModel_js_1 = require("./MistralTextEmbeddingModel.cjs");
6
- const MistralTextGenerationModel_js_1 = require("./MistralTextGenerationModel.cjs");
6
+ const MistralChatModel_js_1 = require("./MistralChatModel.cjs");
7
7
  function Api(settings) {
8
8
  return new MistralApiConfiguration_js_1.MistralApiConfiguration(settings);
9
9
  }
10
10
  exports.Api = Api;
11
- function TextGenerator(settings) {
12
- return new MistralTextGenerationModel_js_1.MistralTextGenerationModel(settings);
11
+ function ChatTextGenerator(settings) {
12
+ return new MistralChatModel_js_1.MistralChatModel(settings);
13
13
  }
14
- exports.TextGenerator = TextGenerator;
14
+ exports.ChatTextGenerator = ChatTextGenerator;
15
15
  function TextEmbedder(settings) {
16
16
  return new MistralTextEmbeddingModel_js_1.MistralTextEmbeddingModel(settings);
17
17
  }
@@ -1,6 +1,7 @@
1
1
  import { MistralApiConfiguration, MistralApiConfigurationSettings } from "./MistralApiConfiguration.js";
2
2
  import { MistralTextEmbeddingModel, MistralTextEmbeddingModelSettings } from "./MistralTextEmbeddingModel.js";
3
- import { MistralTextGenerationModel, MistralTextGenerationModelSettings } from "./MistralTextGenerationModel.js";
3
+ import { MistralChatModel, MistralChatModelSettings } from "./MistralChatModel.js";
4
4
  export declare function Api(settings: MistralApiConfigurationSettings): MistralApiConfiguration;
5
- export declare function TextGenerator(settings: MistralTextGenerationModelSettings): MistralTextGenerationModel;
5
+ export declare function ChatTextGenerator(settings: MistralChatModelSettings): MistralChatModel;
6
6
  export declare function TextEmbedder(settings: MistralTextEmbeddingModelSettings): MistralTextEmbeddingModel;
7
+ export { MistralChatMessage as ChatMessage, MistralChatPrompt as ChatPrompt, } from "./MistralChatModel.js";
@@ -1,11 +1,11 @@
1
1
  import { MistralApiConfiguration, } from "./MistralApiConfiguration.js";
2
2
  import { MistralTextEmbeddingModel, } from "./MistralTextEmbeddingModel.js";
3
- import { MistralTextGenerationModel, } from "./MistralTextGenerationModel.js";
3
+ import { MistralChatModel, } from "./MistralChatModel.js";
4
4
  export function Api(settings) {
5
5
  return new MistralApiConfiguration(settings);
6
6
  }
7
- export function TextGenerator(settings) {
8
- return new MistralTextGenerationModel(settings);
7
+ export function ChatTextGenerator(settings) {
8
+ return new MistralChatModel(settings);
9
9
  }
10
10
  export function TextEmbedder(settings) {
11
11
  return new MistralTextEmbeddingModel(settings);
@@ -1,16 +1,16 @@
1
1
  import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
2
2
  import { TextChatPrompt } from "../../model-function/generate-text/prompt-template/ChatPrompt.js";
3
3
  import { TextInstructionPrompt } from "../../model-function/generate-text/prompt-template/InstructionPrompt.js";
4
- import { MistralTextGenerationPrompt } from "./MistralTextGenerationModel.js";
4
+ import { MistralChatPrompt } from "./MistralChatModel.js";
5
5
  /**
6
6
  * Formats a text prompt as a Mistral prompt.
7
7
  */
8
- export declare function text(): TextGenerationPromptTemplate<string, MistralTextGenerationPrompt>;
8
+ export declare function text(): TextGenerationPromptTemplate<string, MistralChatPrompt>;
9
9
  /**
10
10
  * Formats an instruction prompt as a Mistral prompt.
11
11
  */
12
- export declare function instruction(): TextGenerationPromptTemplate<TextInstructionPrompt, MistralTextGenerationPrompt>;
12
+ export declare function instruction(): TextGenerationPromptTemplate<TextInstructionPrompt, MistralChatPrompt>;
13
13
  /**
14
14
  * Formats a chat prompt as a Mistral prompt.
15
15
  */
16
- export declare function chat(): TextGenerationPromptTemplate<TextChatPrompt, MistralTextGenerationPrompt>;
16
+ export declare function chat(): TextGenerationPromptTemplate<TextChatPrompt, MistralChatPrompt>;
@@ -31,4 +31,4 @@ __exportStar(require("./MistralApiConfiguration.cjs"), exports);
31
31
  exports.mistral = __importStar(require("./MistralFacade.cjs"));
32
32
  exports.MistralPrompt = __importStar(require("./MistralPromptTemplate.cjs"));
33
33
  __exportStar(require("./MistralTextEmbeddingModel.cjs"), exports);
34
- __exportStar(require("./MistralTextGenerationModel.cjs"), exports);
34
+ __exportStar(require("./MistralChatModel.cjs"), exports);
@@ -3,4 +3,4 @@ export { MistralErrorData } from "./MistralError.js";
3
3
  export * as mistral from "./MistralFacade.js";
4
4
  export * as MistralPrompt from "./MistralPromptTemplate.js";
5
5
  export * from "./MistralTextEmbeddingModel.js";
6
- export * from "./MistralTextGenerationModel.js";
6
+ export * from "./MistralChatModel.js";
@@ -2,4 +2,4 @@ export * from "./MistralApiConfiguration.js";
2
2
  export * as mistral from "./MistralFacade.js";
3
3
  export * as MistralPrompt from "./MistralPromptTemplate.js";
4
4
  export * from "./MistralTextEmbeddingModel.js";
5
- export * from "./MistralTextGenerationModel.js";
5
+ export * from "./MistralChatModel.js";
@@ -1,10 +1,11 @@
1
1
  import { BaseUrlApiConfiguration } from "../../core/api/BaseUrlApiConfiguration.js";
2
2
  import { RetryFunction } from "../../core/api/RetryFunction.js";
3
3
  import { ThrottleFunction } from "../../core/api/ThrottleFunction.js";
4
+ export type OllamaApiConfigurationSettings = {
5
+ baseUrl?: string;
6
+ retry?: RetryFunction;
7
+ throttle?: ThrottleFunction;
8
+ };
4
9
  export declare class OllamaApiConfiguration extends BaseUrlApiConfiguration {
5
- constructor({ baseUrl, retry, throttle, }?: {
6
- baseUrl?: string;
7
- retry?: RetryFunction;
8
- throttle?: ThrottleFunction;
9
- });
10
+ constructor({ baseUrl, retry, throttle, }?: OllamaApiConfigurationSettings);
10
11
  }