modelfusion 0.45.3 → 0.46.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -35,7 +35,7 @@ Or use a template: [ModelFusion terminal app starter](https://github.com/lgramme
35
35
 
36
36
  You can provide API keys for the different [integrations](https://modelfusion.dev/integration/model-provider/) using environment variables (e.g., `OPENAI_API_KEY`) or pass them into the model constructors as options.
37
37
 
38
- ### [Generate Text](https://modelfusion.dev/guide/function/generate-text)
38
+ ### [Generate and Stream Text](https://modelfusion.dev/guide/function/generate-text)
39
39
 
40
40
  Generate text using a language model and a prompt.
41
41
  You can stream the text if it is supported by the model.
@@ -71,70 +71,11 @@ for await (const textFragment of textStream) {
71
71
 
72
72
  Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [Anthropic](https://modelfusion.dev/integration/model-provider/anthropic), [Cohere](https://modelfusion.dev/integration/model-provider/cohere), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp)
73
73
 
74
- #### Prompt Format
74
+ ### [Generate and Stream Structure](https://modelfusion.dev/guide/function/generate-structure#generatestructure)
75
75
 
76
- [Prompt format](https://modelfusion.dev/guide/function/generate-text/prompt-format) lets you use higher level prompt structures (such as instruction or chat prompts) for different models.
76
+ Generate typed objects using a language model and a schema.
77
77
 
78
- ```ts
79
- const text = await generateText(
80
- new LlamaCppTextGenerationModel({
81
- contextWindowSize: 4096, // Llama 2 context window size
82
- maxCompletionTokens: 1000,
83
- }).withPromptFormat(mapInstructionPromptToLlama2Format()),
84
- {
85
- system: "You are a story writer.",
86
- instruction: "Write a short story about a robot learning to love.",
87
- }
88
- );
89
- ```
90
-
91
- ```ts
92
- const textStream = await streamText(
93
- new OpenAIChatModel({
94
- model: "gpt-3.5-turbo",
95
- }).withPromptFormat(mapChatPromptToOpenAIChatFormat()),
96
- [
97
- { system: "You are a celebrated poet." },
98
- { user: "Write a short story about a robot learning to love." },
99
- { ai: "Once upon a time, there was a robot who learned to love." },
100
- { user: "That's a great start!" },
101
- ]
102
- );
103
- ```
104
-
105
- | Prompt Format | Instruction Prompt | Chat Prompt |
106
- | ------------- | ------------------ | ----------- |
107
- | OpenAI Chat | ✅ | ✅ |
108
- | Anthropic | ✅ | ✅ |
109
- | Llama 2 | ✅ | ✅ |
110
- | Alpaca | ✅ | ❌ |
111
- | Vicuna | ❌ | ✅ |
112
- | Generic Text | ✅ | ✅ |
113
-
114
- #### Metadata and original responses
115
-
116
- ModelFusion model functions return rich results that include the original response and metadata when you call `.asFullResponse()` before resolving the promise.
117
-
118
- ```ts
119
- // access the full response (needs to be typed) and the metadata:
120
- const { value, response, metadata } = await generateText(
121
- new OpenAITextGenerationModel({
122
- model: "gpt-3.5-turbo-instruct",
123
- maxCompletionTokens: 1000,
124
- n: 2, // generate 2 completions
125
- }),
126
- "Write a short story about a robot learning to love:\n\n"
127
- ).asFullResponse();
128
-
129
- console.log(metadata);
130
-
131
- // cast to the response type:
132
- for (const choice of (response as OpenAITextGenerationResponse).choices) {
133
- console.log(choice.text);
134
- }
135
- ```
136
-
137
- ### [Generate Structure](https://modelfusion.dev/guide/function/generate-structure#generatestructure)
78
+ #### generateStructure
138
79
 
139
80
  Generate a structure that matches a schema.
140
81
 
@@ -169,7 +110,7 @@ const sentiment = await generateStructure(
169
110
 
170
111
  Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai)
171
112
 
172
- ### [Stream Structure](https://modelfusion.dev/guide/function/generate-structure#streamstructure)
113
+ #### streamStructure
173
114
 
174
115
  Stream a structure that matches a schema. Partial structures before the final part are untyped JSON.
175
116
 
@@ -490,6 +431,71 @@ const result = await guard(
490
431
  );
491
432
  ```
492
433
 
434
+ ### [Prompt Formats](<(https://modelfusion.dev/guide/function/generate-text/prompt-format)>)
435
+
436
+ Prompt formats let you use higher level prompt structures (such as instruction or chat prompts) for different models.
437
+
438
+ ```ts
439
+ const text = await generateText(
440
+ new LlamaCppTextGenerationModel({
441
+ contextWindowSize: 4096, // Llama 2 context window size
442
+ maxCompletionTokens: 1000,
443
+ }).withPromptFormat(mapInstructionPromptToLlama2Format()),
444
+ {
445
+ system: "You are a story writer.",
446
+ instruction: "Write a short story about a robot learning to love.",
447
+ }
448
+ );
449
+ ```
450
+
451
+ They can also be accessed through the shorthand methods `.withChatPrompt()` and `.withInstructionPrompt()` for many models:
452
+
453
+ ```ts
454
+ const textStream = await streamText(
455
+ new OpenAIChatModel({
456
+ model: "gpt-3.5-turbo",
457
+ }).withChatPrompt(),
458
+ [
459
+ { system: "You are a celebrated poet." },
460
+ { user: "Write a short story about a robot learning to love." },
461
+ { ai: "Once upon a time, there was a robot who learned to love." },
462
+ { user: "That's a great start!" },
463
+ ]
464
+ );
465
+ ```
466
+
467
+ | Prompt Format | Instruction Prompt | Chat Prompt |
468
+ | ------------- | ------------------ | ----------- |
469
+ | OpenAI Chat | ✅ | ✅ |
470
+ | Anthropic | ✅ | ✅ |
471
+ | Llama 2 | ✅ | ✅ |
472
+ | Alpaca | ✅ | ❌ |
473
+ | Vicuna | ❌ | ✅ |
474
+ | Generic Text | ✅ | ✅ |
475
+
476
+ ### Metadata and original responses
477
+
478
+ ModelFusion model functions return rich results that include the original response and metadata when you call `.asFullResponse()` before resolving the promise.
479
+
480
+ ```ts
481
+ // access the full response (needs to be typed) and the metadata:
482
+ const { value, response, metadata } = await generateText(
483
+ new OpenAITextGenerationModel({
484
+ model: "gpt-3.5-turbo-instruct",
485
+ maxCompletionTokens: 1000,
486
+ n: 2, // generate 2 completions
487
+ }),
488
+ "Write a short story about a robot learning to love:\n\n"
489
+ ).asFullResponse();
490
+
491
+ console.log(metadata);
492
+
493
+ // cast to the response type:
494
+ for (const choice of (response as OpenAITextGenerationResponse).choices) {
495
+ console.log(choice.text);
496
+ }
497
+ ```
498
+
493
499
  ### Observability
494
500
 
495
501
  Integrations: [Helicone](https://modelfusion.dev/integration/observability/helicone)
@@ -20,6 +20,13 @@ export interface TextGenerationModelSettings extends ModelSettings {
20
20
  */
21
21
  trimWhitespace?: boolean;
22
22
  }
23
+ export interface HasContextWindowSize {
24
+ contextWindowSize: number;
25
+ }
26
+ export interface HasTokenizer<PROMPT> {
27
+ tokenizer: BasicTokenizer | FullTokenizer;
28
+ countPromptTokens(prompt: PROMPT): PromiseLike<number>;
29
+ }
23
30
  export interface TextGenerationModel<PROMPT, SETTINGS extends TextGenerationModelSettings = TextGenerationModelSettings> extends Model<SETTINGS> {
24
31
  readonly contextWindowSize: number | undefined;
25
32
  readonly tokenizer: BasicTokenizer | FullTokenizer | undefined;
@@ -14,6 +14,7 @@ const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
14
14
  const PromptFormatTextStreamingModel_js_1 = require("../../prompt/PromptFormatTextStreamingModel.cjs");
15
15
  const AnthropicApiConfiguration_js_1 = require("./AnthropicApiConfiguration.cjs");
16
16
  const AnthropicError_js_1 = require("./AnthropicError.cjs");
17
+ const AnthropicPromptFormat_js_1 = require("./AnthropicPromptFormat.cjs");
17
18
  exports.ANTHROPIC_TEXT_GENERATION_MODELS = {
18
19
  "claude-instant-1": {
19
20
  contextWindowSize: 100000,
@@ -107,6 +108,18 @@ class AnthropicTextGenerationModel extends AbstractModel_js_1.AbstractModel {
107
108
  responseFormat: exports.AnthropicTextGenerationResponseFormat.deltaIterable,
108
109
  });
109
110
  }
111
+ /**
112
+ * Returns this model with an instruction prompt format.
113
+ */
114
+ withInstructionPrompt() {
115
+ return this.withPromptFormat((0, AnthropicPromptFormat_js_1.mapInstructionPromptToAnthropicFormat)());
116
+ }
117
+ /**
118
+ * Returns this model with a chat prompt format.
119
+ */
120
+ withChatPrompt() {
121
+ return this.withPromptFormat((0, AnthropicPromptFormat_js_1.mapChatPromptToAnthropicFormat)());
122
+ }
110
123
  withPromptFormat(promptFormat) {
111
124
  return new PromptFormatTextStreamingModel_js_1.PromptFormatTextStreamingModel({
112
125
  model: this.withSettings({
@@ -55,6 +55,14 @@ export declare class AnthropicTextGenerationModel extends AbstractModel<Anthropi
55
55
  text: string;
56
56
  }>;
57
57
  doStreamText(prompt: string, options?: FunctionOptions): Promise<AsyncIterable<Delta<string>>>;
58
+ /**
59
+ * Returns this model with an instruction prompt format.
60
+ */
61
+ withInstructionPrompt(): PromptFormatTextStreamingModel<import("../../index.js").InstructionPrompt, string, AnthropicTextGenerationModelSettings, this>;
62
+ /**
63
+ * Returns this model with a chat prompt format.
64
+ */
65
+ withChatPrompt(): PromptFormatTextStreamingModel<import("../../index.js").ChatPrompt, string, AnthropicTextGenerationModelSettings, this>;
58
66
  withPromptFormat<INPUT_PROMPT>(promptFormat: PromptFormat<INPUT_PROMPT, string>): PromptFormatTextStreamingModel<INPUT_PROMPT, string, AnthropicTextGenerationModelSettings, this>;
59
67
  withSettings(additionalSettings: Partial<AnthropicTextGenerationModelSettings>): this;
60
68
  }
@@ -8,6 +8,7 @@ import { AbstractModel } from "../../model-function/AbstractModel.js";
8
8
  import { PromptFormatTextStreamingModel } from "../../prompt/PromptFormatTextStreamingModel.js";
9
9
  import { AnthropicApiConfiguration } from "./AnthropicApiConfiguration.js";
10
10
  import { failedAnthropicCallResponseHandler } from "./AnthropicError.js";
11
+ import { mapChatPromptToAnthropicFormat, mapInstructionPromptToAnthropicFormat, } from "./AnthropicPromptFormat.js";
11
12
  export const ANTHROPIC_TEXT_GENERATION_MODELS = {
12
13
  "claude-instant-1": {
13
14
  contextWindowSize: 100000,
@@ -101,6 +102,18 @@ export class AnthropicTextGenerationModel extends AbstractModel {
101
102
  responseFormat: AnthropicTextGenerationResponseFormat.deltaIterable,
102
103
  });
103
104
  }
105
+ /**
106
+ * Returns this model with an instruction prompt format.
107
+ */
108
+ withInstructionPrompt() {
109
+ return this.withPromptFormat(mapInstructionPromptToAnthropicFormat());
110
+ }
111
+ /**
112
+ * Returns this model with a chat prompt format.
113
+ */
114
+ withChatPrompt() {
115
+ return this.withPromptFormat(mapChatPromptToAnthropicFormat());
116
+ }
104
117
  withPromptFormat(promptFormat) {
105
118
  return new PromptFormatTextStreamingModel({
106
119
  model: this.withSettings({
@@ -12,6 +12,7 @@ const AsyncQueue_js_1 = require("../../event-source/AsyncQueue.cjs");
12
12
  const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
13
13
  const countTokens_js_1 = require("../../model-function/tokenize-text/countTokens.cjs");
14
14
  const PromptFormatTextStreamingModel_js_1 = require("../../prompt/PromptFormatTextStreamingModel.cjs");
15
+ const TextPromptFormat_js_1 = require("../../prompt/TextPromptFormat.cjs");
15
16
  const CohereApiConfiguration_js_1 = require("./CohereApiConfiguration.cjs");
16
17
  const CohereError_js_1 = require("./CohereError.cjs");
17
18
  const CohereTokenizer_js_1 = require("./CohereTokenizer.cjs");
@@ -134,6 +135,18 @@ class CohereTextGenerationModel extends AbstractModel_js_1.AbstractModel {
134
135
  extractTextDelta(fullDelta) {
135
136
  return fullDelta.delta;
136
137
  }
138
+ /**
139
+ * Returns this model with an instruction prompt format.
140
+ */
141
+ withInstructionPrompt() {
142
+ return this.withPromptFormat((0, TextPromptFormat_js_1.mapInstructionPromptToTextFormat)());
143
+ }
144
+ /**
145
+ * Returns this model with a chat prompt format.
146
+ */
147
+ withChatPrompt(options) {
148
+ return this.withPromptFormat((0, TextPromptFormat_js_1.mapChatPromptToTextFormat)(options));
149
+ }
137
150
  withPromptFormat(promptFormat) {
138
151
  return new PromptFormatTextStreamingModel_js_1.PromptFormatTextStreamingModel({
139
152
  model: this.withSettings({
@@ -84,6 +84,17 @@ export declare class CohereTextGenerationModel extends AbstractModel<CohereTextG
84
84
  }>;
85
85
  doStreamText(prompt: string, options?: FunctionOptions): Promise<AsyncIterable<Delta<string>>>;
86
86
  extractTextDelta(fullDelta: CohereTextGenerationDelta): string | undefined;
87
+ /**
88
+ * Returns this model with an instruction prompt format.
89
+ */
90
+ withInstructionPrompt(): PromptFormatTextStreamingModel<import("../../index.js").InstructionPrompt, string, CohereTextGenerationModelSettings, this>;
91
+ /**
92
+ * Returns this model with a chat prompt format.
93
+ */
94
+ withChatPrompt(options?: {
95
+ user?: string;
96
+ ai?: string;
97
+ }): PromptFormatTextStreamingModel<import("../../index.js").ChatPrompt, string, CohereTextGenerationModelSettings, this>;
87
98
  withPromptFormat<INPUT_PROMPT>(promptFormat: PromptFormat<INPUT_PROMPT, string>): PromptFormatTextStreamingModel<INPUT_PROMPT, string, CohereTextGenerationModelSettings, this>;
88
99
  withSettings(additionalSettings: Partial<CohereTextGenerationModelSettings>): this;
89
100
  }
@@ -6,6 +6,7 @@ import { AsyncQueue } from "../../event-source/AsyncQueue.js";
6
6
  import { AbstractModel } from "../../model-function/AbstractModel.js";
7
7
  import { countTokens } from "../../model-function/tokenize-text/countTokens.js";
8
8
  import { PromptFormatTextStreamingModel } from "../../prompt/PromptFormatTextStreamingModel.js";
9
+ import { mapChatPromptToTextFormat, mapInstructionPromptToTextFormat, } from "../../prompt/TextPromptFormat.js";
9
10
  import { CohereApiConfiguration } from "./CohereApiConfiguration.js";
10
11
  import { failedCohereCallResponseHandler } from "./CohereError.js";
11
12
  import { CohereTokenizer } from "./CohereTokenizer.js";
@@ -128,6 +129,18 @@ export class CohereTextGenerationModel extends AbstractModel {
128
129
  extractTextDelta(fullDelta) {
129
130
  return fullDelta.delta;
130
131
  }
132
+ /**
133
+ * Returns this model with an instruction prompt format.
134
+ */
135
+ withInstructionPrompt() {
136
+ return this.withPromptFormat(mapInstructionPromptToTextFormat());
137
+ }
138
+ /**
139
+ * Returns this model with a chat prompt format.
140
+ */
141
+ withChatPrompt(options) {
142
+ return this.withPromptFormat(mapChatPromptToTextFormat(options));
143
+ }
131
144
  withPromptFormat(promptFormat) {
132
145
  return new PromptFormatTextStreamingModel({
133
146
  model: this.withSettings({
@@ -13,6 +13,7 @@ const parseEventSourceStream_js_1 = require("../../event-source/parseEventSource
13
13
  const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
14
14
  const countTokens_js_1 = require("../../model-function/tokenize-text/countTokens.cjs");
15
15
  const PromptFormatTextStreamingModel_js_1 = require("../../prompt/PromptFormatTextStreamingModel.cjs");
16
+ const TextPromptFormat_js_1 = require("../../prompt/TextPromptFormat.cjs");
16
17
  const OpenAIApiConfiguration_js_1 = require("./OpenAIApiConfiguration.cjs");
17
18
  const OpenAIError_js_1 = require("./OpenAIError.cjs");
18
19
  const TikTokenTokenizer_js_1 = require("./TikTokenTokenizer.cjs");
@@ -238,6 +239,18 @@ class OpenAITextGenerationModel extends AbstractModel_js_1.AbstractModel {
238
239
  responseFormat: exports.OpenAITextResponseFormat.deltaIterable,
239
240
  });
240
241
  }
242
+ /**
243
+ * Returns this model with an instruction prompt format.
244
+ */
245
+ withInstructionPrompt() {
246
+ return this.withPromptFormat((0, TextPromptFormat_js_1.mapInstructionPromptToTextFormat)());
247
+ }
248
+ /**
249
+ * Returns this model with a chat prompt format.
250
+ */
251
+ withChatPrompt(options) {
252
+ return this.withPromptFormat((0, TextPromptFormat_js_1.mapChatPromptToTextFormat)(options));
253
+ }
241
254
  withPromptFormat(promptFormat) {
242
255
  return new PromptFormatTextStreamingModel_js_1.PromptFormatTextStreamingModel({
243
256
  model: this.withSettings({
@@ -171,6 +171,17 @@ export declare class OpenAITextGenerationModel extends AbstractModel<OpenAITextG
171
171
  };
172
172
  }>;
173
173
  doStreamText(prompt: string, options?: FunctionOptions): Promise<AsyncIterable<Delta<string>>>;
174
+ /**
175
+ * Returns this model with an instruction prompt format.
176
+ */
177
+ withInstructionPrompt(): PromptFormatTextStreamingModel<import("../../index.js").InstructionPrompt, string, OpenAITextGenerationModelSettings, this>;
178
+ /**
179
+ * Returns this model with a chat prompt format.
180
+ */
181
+ withChatPrompt(options?: {
182
+ user?: string;
183
+ ai?: string;
184
+ }): PromptFormatTextStreamingModel<import("../../index.js").ChatPrompt, string, OpenAITextGenerationModelSettings, this>;
174
185
  withPromptFormat<INPUT_PROMPT>(promptFormat: PromptFormat<INPUT_PROMPT, string>): PromptFormatTextStreamingModel<INPUT_PROMPT, string, OpenAITextGenerationModelSettings, this>;
175
186
  withSettings(additionalSettings: Partial<OpenAITextGenerationModelSettings>): this;
176
187
  }
@@ -7,6 +7,7 @@ import { parseEventSourceStream } from "../../event-source/parseEventSourceStrea
7
7
  import { AbstractModel } from "../../model-function/AbstractModel.js";
8
8
  import { countTokens } from "../../model-function/tokenize-text/countTokens.js";
9
9
  import { PromptFormatTextStreamingModel } from "../../prompt/PromptFormatTextStreamingModel.js";
10
+ import { mapChatPromptToTextFormat, mapInstructionPromptToTextFormat, } from "../../prompt/TextPromptFormat.js";
10
11
  import { OpenAIApiConfiguration } from "./OpenAIApiConfiguration.js";
11
12
  import { failedOpenAICallResponseHandler } from "./OpenAIError.js";
12
13
  import { TikTokenTokenizer } from "./TikTokenTokenizer.js";
@@ -229,6 +230,18 @@ export class OpenAITextGenerationModel extends AbstractModel {
229
230
  responseFormat: OpenAITextResponseFormat.deltaIterable,
230
231
  });
231
232
  }
233
+ /**
234
+ * Returns this model with an instruction prompt format.
235
+ */
236
+ withInstructionPrompt() {
237
+ return this.withPromptFormat(mapInstructionPromptToTextFormat());
238
+ }
239
+ /**
240
+ * Returns this model with a chat prompt format.
241
+ */
242
+ withChatPrompt(options) {
243
+ return this.withPromptFormat(mapChatPromptToTextFormat(options));
244
+ }
232
245
  withPromptFormat(promptFormat) {
233
246
  return new PromptFormatTextStreamingModel({
234
247
  model: this.withSettings({
@@ -15,6 +15,7 @@ const PromptFormatTextStreamingModel_js_1 = require("../../../prompt/PromptForma
15
15
  const OpenAIApiConfiguration_js_1 = require("../OpenAIApiConfiguration.cjs");
16
16
  const OpenAIError_js_1 = require("../OpenAIError.cjs");
17
17
  const TikTokenTokenizer_js_1 = require("../TikTokenTokenizer.cjs");
18
+ const OpenAIChatPromptFormat_js_1 = require("./OpenAIChatPromptFormat.cjs");
18
19
  const OpenAIChatStreamIterable_js_1 = require("./OpenAIChatStreamIterable.cjs");
19
20
  const countOpenAIChatMessageTokens_js_1 = require("./countOpenAIChatMessageTokens.cjs");
20
21
  /*
@@ -341,6 +342,18 @@ class OpenAIChatModel extends AbstractModel_js_1.AbstractModel {
341
342
  totalTokens: response.usage.total_tokens,
342
343
  };
343
344
  }
345
+ /**
346
+ * Returns this model with an instruction prompt format.
347
+ */
348
+ withInstructionPrompt() {
349
+ return this.withPromptFormat((0, OpenAIChatPromptFormat_js_1.mapInstructionPromptToOpenAIChatFormat)());
350
+ }
351
+ /**
352
+ * Returns this model with a chat prompt format.
353
+ */
354
+ withChatPrompt() {
355
+ return this.withPromptFormat((0, OpenAIChatPromptFormat_js_1.mapChatPromptToOpenAIChatFormat)());
356
+ }
344
357
  withPromptFormat(promptFormat) {
345
358
  return new PromptFormatTextStreamingModel_js_1.PromptFormatTextStreamingModel({
346
359
  model: this.withSettings({
@@ -307,6 +307,14 @@ export declare class OpenAIChatModel extends AbstractModel<OpenAIChatSettings> i
307
307
  completionTokens: number;
308
308
  totalTokens: number;
309
309
  };
310
+ /**
311
+ * Returns this model with an instruction prompt format.
312
+ */
313
+ withInstructionPrompt(): PromptFormatTextStreamingModel<import("../../../index.js").InstructionPrompt, OpenAIChatMessage[], OpenAIChatSettings, this>;
314
+ /**
315
+ * Returns this model with a chat prompt format.
316
+ */
317
+ withChatPrompt(): PromptFormatTextStreamingModel<import("../../../index.js").ChatPrompt, OpenAIChatMessage[], OpenAIChatSettings, this>;
310
318
  withPromptFormat<INPUT_PROMPT>(promptFormat: PromptFormat<INPUT_PROMPT, OpenAIChatMessage[]>): PromptFormatTextStreamingModel<INPUT_PROMPT, OpenAIChatMessage[], OpenAIChatSettings, this>;
311
319
  withSettings(additionalSettings: Partial<OpenAIChatSettings>): this;
312
320
  }
@@ -9,6 +9,7 @@ import { PromptFormatTextStreamingModel } from "../../../prompt/PromptFormatText
9
9
  import { OpenAIApiConfiguration } from "../OpenAIApiConfiguration.js";
10
10
  import { failedOpenAICallResponseHandler } from "../OpenAIError.js";
11
11
  import { TikTokenTokenizer } from "../TikTokenTokenizer.js";
12
+ import { mapChatPromptToOpenAIChatFormat, mapInstructionPromptToOpenAIChatFormat, } from "./OpenAIChatPromptFormat.js";
12
13
  import { createOpenAIChatDeltaIterableQueue } from "./OpenAIChatStreamIterable.js";
13
14
  import { countOpenAIChatPromptTokens } from "./countOpenAIChatMessageTokens.js";
14
15
  /*
@@ -332,6 +333,18 @@ export class OpenAIChatModel extends AbstractModel {
332
333
  totalTokens: response.usage.total_tokens,
333
334
  };
334
335
  }
336
+ /**
337
+ * Returns this model with an instruction prompt format.
338
+ */
339
+ withInstructionPrompt() {
340
+ return this.withPromptFormat(mapInstructionPromptToOpenAIChatFormat());
341
+ }
342
+ /**
343
+ * Returns this model with a chat prompt format.
344
+ */
345
+ withChatPrompt() {
346
+ return this.withPromptFormat(mapChatPromptToOpenAIChatFormat());
347
+ }
335
348
  withPromptFormat(promptFormat) {
336
349
  return new PromptFormatTextStreamingModel({
337
350
  model: this.withSettings({
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "modelfusion",
3
3
  "description": "Build multimodal applications, chatbots, and agents with JavaScript and TypeScript.",
4
- "version": "0.45.3",
4
+ "version": "0.46.0",
5
5
  "author": "Lars Grammel",
6
6
  "license": "MIT",
7
7
  "keywords": [
@@ -26,7 +26,7 @@ exports.mapInstructionPromptToTextFormat = mapInstructionPromptToTextFormat;
26
26
  * @param user The label of the user in the chat.
27
27
  * @param ai The name of the AI in the chat.
28
28
  */
29
- const mapChatPromptToTextFormat = ({ user, ai }) => ({
29
+ const mapChatPromptToTextFormat = ({ user = "user", ai = "ai", } = {}) => ({
30
30
  format: (chatPrompt) => {
31
31
  (0, validateChatPrompt_js_1.validateChatPrompt)(chatPrompt);
32
32
  let text = "";
@@ -11,7 +11,7 @@ export declare const mapInstructionPromptToTextFormat: () => PromptFormat<Instru
11
11
  * @param user The label of the user in the chat.
12
12
  * @param ai The name of the AI in the chat.
13
13
  */
14
- export declare const mapChatPromptToTextFormat: ({ user, ai, }: {
15
- user: string;
16
- ai: string;
14
+ export declare const mapChatPromptToTextFormat: (options?: {
15
+ user?: string;
16
+ ai?: string;
17
17
  }) => PromptFormat<ChatPrompt, string>;
@@ -22,7 +22,7 @@ export const mapInstructionPromptToTextFormat = () => ({
22
22
  * @param user The label of the user in the chat.
23
23
  * @param ai The name of the AI in the chat.
24
24
  */
25
- export const mapChatPromptToTextFormat = ({ user, ai }) => ({
25
+ export const mapChatPromptToTextFormat = ({ user = "user", ai = "ai", } = {}) => ({
26
26
  format: (chatPrompt) => {
27
27
  validateChatPrompt(chatPrompt);
28
28
  let text = "";
@@ -1,4 +1,4 @@
1
- import { TextGenerationModel, TextGenerationModelSettings } from "../../model-function/generate-text/TextGenerationModel.js";
1
+ import { HasContextWindowSize, HasTokenizer, TextGenerationModel, TextGenerationModelSettings } from "../../model-function/generate-text/TextGenerationModel.js";
2
2
  import { ChatPrompt } from "./ChatPrompt.js";
3
3
  /**
4
4
  * Keeps only the most recent messages in the prompt, while leaving enough space for the completion.
@@ -12,9 +12,6 @@ import { ChatPrompt } from "./ChatPrompt.js";
12
12
  */
13
13
  export declare function trimChatPrompt({ prompt, model, tokenLimit, }: {
14
14
  prompt: ChatPrompt;
15
- model: TextGenerationModel<ChatPrompt, TextGenerationModelSettings> & {
16
- contextWindowSize: number;
17
- countPromptTokens: (prompt: ChatPrompt) => PromiseLike<number>;
18
- };
15
+ model: TextGenerationModel<ChatPrompt, TextGenerationModelSettings> & HasTokenizer<ChatPrompt> & HasContextWindowSize;
19
16
  tokenLimit?: number;
20
17
  }): Promise<ChatPrompt>;