modelfusion 0.87.2 → 0.89.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,5 +1,5 @@
1
1
  import { TextGenerationPromptFormat } from "../TextGenerationPromptFormat.js";
2
- import { ChatPrompt } from "./ChatPrompt.js";
2
+ import { TextChatPrompt } from "./ChatPrompt.js";
3
3
  import { TextInstructionPrompt } from "./InstructionPrompt.js";
4
4
  /**
5
5
  * Formats a text prompt using the ChatML format.
@@ -32,4 +32,4 @@ export declare function instruction(): TextGenerationPromptFormat<TextInstructio
32
32
  * Paris<|im_end|>
33
33
  * ```
34
34
  */
35
- export declare function chat(): TextGenerationPromptFormat<ChatPrompt, string>;
35
+ export declare function chat(): TextGenerationPromptFormat<TextChatPrompt, string>;
@@ -1,5 +1,6 @@
1
+ import { MultiModalInput } from "./Content.js";
1
2
  /**
2
- * A chat prompt is a combination of a system message and a list of messages with the following constraints:
3
+ * A textual chat prompt is a combination of a system message and a list of messages with the following constraints:
3
4
  *
4
5
  * - A chat prompt can optionally have a system message.
5
6
  * - The first message of the chat must be a user message.
@@ -24,21 +25,35 @@
24
25
  *
25
26
  * @see validateChatPrompt
26
27
  */
27
- export interface ChatPrompt {
28
+ export interface TextChatPrompt {
28
29
  system?: string;
29
- messages: Array<ChatMessage>;
30
+ messages: Array<TextChatMessage>;
30
31
  }
31
32
  /**
32
- * A message in a chat prompt.
33
- * @see ChatPrompt
33
+ * A text message in a chat prompt.
34
+ * @see TextChatPrompt
34
35
  */
35
- export interface ChatMessage {
36
- role: "user" | "assistant";
36
+ export type TextChatMessage = {
37
+ role: "user";
37
38
  content: string;
39
+ } | {
40
+ role: "assistant";
41
+ content: string;
42
+ };
43
+ export interface MultiModalChatPrompt {
44
+ system?: string;
45
+ messages: Array<MultiModalChatMessage>;
38
46
  }
47
+ export type MultiModalChatMessage = {
48
+ role: "user";
49
+ content: MultiModalInput;
50
+ } | {
51
+ role: "assistant";
52
+ content: string;
53
+ };
39
54
  /**
40
55
  * Checks if a chat prompt is valid. Throws a {@link ChatPromptValidationError} if it's not.
41
56
  *
42
57
  * @throws {@link ChatPromptValidationError}
43
58
  */
44
- export declare function validateChatPrompt(chatPrompt: ChatPrompt): void;
59
+ export declare function validateChatPrompt(chatPrompt: TextChatPrompt | MultiModalChatPrompt): void;
@@ -1,5 +1,5 @@
1
1
  import { TextGenerationPromptFormat } from "../TextGenerationPromptFormat.js";
2
- import { ChatPrompt } from "./ChatPrompt.js";
2
+ import { TextChatPrompt } from "./ChatPrompt.js";
3
3
  import { TextInstructionPrompt } from "./InstructionPrompt.js";
4
4
  /**
5
5
  * Formats a text prompt as a Llama 2 prompt.
@@ -39,4 +39,4 @@ export declare function instruction(): TextGenerationPromptFormat<TextInstructio
39
39
  * ${ user msg 1 } [/INST] ${ model response 1 } </s><s>[INST] ${ user msg 2 } [/INST] ${ model response 2 } </s><s>[INST] ${ user msg 3 } [/INST]
40
40
  * ```
41
41
  */
42
- export declare function chat(): TextGenerationPromptFormat<ChatPrompt, string>;
42
+ export declare function chat(): TextGenerationPromptFormat<TextChatPrompt, string>;
@@ -1,5 +1,5 @@
1
1
  import { TextGenerationPromptFormat } from "../TextGenerationPromptFormat.js";
2
- import { ChatPrompt } from "./ChatPrompt.js";
2
+ import { TextChatPrompt } from "./ChatPrompt.js";
3
3
  import { TextInstructionPrompt } from "./InstructionPrompt.js";
4
4
  /**
5
5
  * Formats a text prompt as a basic text prompt. Does not change the text prompt in any way.
@@ -20,4 +20,4 @@ export declare const chat: (options?: {
20
20
  user?: string;
21
21
  assistant?: string;
22
22
  system?: string;
23
- }) => TextGenerationPromptFormat<ChatPrompt, string>;
23
+ }) => TextGenerationPromptFormat<TextChatPrompt, string>;
@@ -1,5 +1,5 @@
1
1
  import { TextGenerationPromptFormat } from "../TextGenerationPromptFormat.js";
2
- import { ChatPrompt } from "./ChatPrompt.js";
2
+ import { TextChatPrompt } from "./ChatPrompt.js";
3
3
  /**
4
4
  * Formats a chat prompt as a Vicuna prompt.
5
5
  *
@@ -13,4 +13,4 @@ import { ChatPrompt } from "./ChatPrompt.js";
13
13
  * ASSISTANT:
14
14
  * ```
15
15
  */
16
- export declare function chat(): TextGenerationPromptFormat<ChatPrompt, string>;
16
+ export declare function chat(): TextGenerationPromptFormat<TextChatPrompt, string>;
@@ -1,5 +1,5 @@
1
1
  import { HasContextWindowSize, HasTokenizer, TextGenerationModel, TextGenerationModelSettings } from "../TextGenerationModel.js";
2
- import { ChatPrompt } from "./ChatPrompt.js";
2
+ import { TextChatPrompt } from "./ChatPrompt.js";
3
3
  /**
4
4
  * Keeps only the most recent messages in the prompt, while leaving enough space for the completion.
5
5
  *
@@ -11,7 +11,7 @@ import { ChatPrompt } from "./ChatPrompt.js";
11
11
  * @see https://modelfusion.dev/guide/function/generate-text#limiting-the-chat-length
12
12
  */
13
13
  export declare function trimChatPrompt({ prompt, model, tokenLimit, }: {
14
- prompt: ChatPrompt;
15
- model: TextGenerationModel<ChatPrompt, TextGenerationModelSettings> & HasTokenizer<ChatPrompt> & HasContextWindowSize;
14
+ prompt: TextChatPrompt;
15
+ model: TextGenerationModel<TextChatPrompt, TextGenerationModelSettings> & HasTokenizer<TextChatPrompt> & HasContextWindowSize;
16
16
  tokenLimit?: number;
17
- }): Promise<ChatPrompt>;
17
+ }): Promise<TextChatPrompt>;
@@ -1,5 +1,5 @@
1
1
  import { TextGenerationPromptFormat } from "../../model-function/generate-text/TextGenerationPromptFormat.js";
2
- import { ChatPrompt } from "../../model-function/generate-text/prompt-format/ChatPrompt.js";
2
+ import { TextChatPrompt } from "../../model-function/generate-text/prompt-format/ChatPrompt.js";
3
3
  import { TextInstructionPrompt } from "../../model-function/generate-text/prompt-format/InstructionPrompt.js";
4
4
  /**
5
5
  * Formats a text prompt as an Anthropic prompt.
@@ -14,4 +14,4 @@ export declare function instruction(): TextGenerationPromptFormat<TextInstructio
14
14
  *
15
15
  * @see https://docs.anthropic.com/claude/docs/constructing-a-prompt
16
16
  */
17
- export declare function chat(): TextGenerationPromptFormat<ChatPrompt, string>;
17
+ export declare function chat(): TextGenerationPromptFormat<TextChatPrompt, string>;
@@ -69,7 +69,7 @@ export declare class AnthropicTextGenerationModel extends AbstractModel<Anthropi
69
69
  /**
70
70
  * Returns this model with a chat prompt format.
71
71
  */
72
- withChatPrompt(): PromptFormatTextStreamingModel<import("../../index.js").ChatPrompt, string, AnthropicTextGenerationModelSettings, this>;
72
+ withChatPrompt(): PromptFormatTextStreamingModel<import("../../index.js").TextChatPrompt, string, AnthropicTextGenerationModelSettings, this>;
73
73
  withPromptFormat<INPUT_PROMPT>(promptFormat: TextGenerationPromptFormat<INPUT_PROMPT, string>): PromptFormatTextStreamingModel<INPUT_PROMPT, string, AnthropicTextGenerationModelSettings, this>;
74
74
  withSettings(additionalSettings: Partial<AnthropicTextGenerationModelSettings>): this;
75
75
  }
@@ -94,7 +94,7 @@ export declare class CohereTextGenerationModel extends AbstractModel<CohereTextG
94
94
  withChatPrompt(options?: {
95
95
  user?: string;
96
96
  assistant?: string;
97
- }): PromptFormatTextStreamingModel<import("../../index.js").ChatPrompt, string, CohereTextGenerationModelSettings, this>;
97
+ }): PromptFormatTextStreamingModel<import("../../index.js").TextChatPrompt, string, CohereTextGenerationModelSettings, this>;
98
98
  withPromptFormat<INPUT_PROMPT>(promptFormat: TextGenerationPromptFormat<INPUT_PROMPT, string>): PromptFormatTextStreamingModel<INPUT_PROMPT, string, CohereTextGenerationModelSettings, this>;
99
99
  withSettings(additionalSettings: Partial<CohereTextGenerationModelSettings>): this;
100
100
  }
@@ -1,6 +1,7 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.instruction = void 0;
3
+ exports.chat = exports.instruction = void 0;
4
+ const ChatPrompt_js_1 = require("../../model-function/generate-text/prompt-format/ChatPrompt.cjs");
4
5
  // default Vicuna 1 system message
5
6
  const DEFAULT_SYSTEM_MESSAGE = "A chat between a curious user and an artificial intelligence assistant. " +
6
7
  "The assistant gives helpful, detailed, and polite answers to the user's questions.";
@@ -33,10 +34,57 @@ function instruction() {
33
34
  }
34
35
  text += `${content}\n`;
35
36
  }
36
- text += `ASSISTANT: `;
37
+ text += `\nASSISTANT: `;
37
38
  return { text, images };
38
39
  },
39
40
  stopSequences: [`\nUSER:`],
40
41
  };
41
42
  }
42
43
  exports.instruction = instruction;
44
+ function chat() {
45
+ return {
46
+ format(prompt) {
47
+ (0, ChatPrompt_js_1.validateChatPrompt)(prompt);
48
+ let text = "";
49
+ text += `${prompt.system ?? DEFAULT_SYSTEM_MESSAGE}\n\n`;
50
+ // construct text and image mapping:
51
+ let imageCounter = 1;
52
+ const images = {};
53
+ for (const { role, content } of prompt.messages) {
54
+ switch (role) {
55
+ case "user": {
56
+ text += `USER: `;
57
+ for (const part of content) {
58
+ switch (part.type) {
59
+ case "text": {
60
+ text += part.text;
61
+ break;
62
+ }
63
+ case "image": {
64
+ text += `[img-${imageCounter}]`;
65
+ images[imageCounter.toString()] = part.base64Image;
66
+ imageCounter++;
67
+ break;
68
+ }
69
+ }
70
+ }
71
+ break;
72
+ }
73
+ case "assistant": {
74
+ text += `ASSISTANT: ${content}`;
75
+ break;
76
+ }
77
+ default: {
78
+ const _exhaustiveCheck = role;
79
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
80
+ }
81
+ }
82
+ text += `\n\n`;
83
+ }
84
+ text += `ASSISTANT: `;
85
+ return { text, images };
86
+ },
87
+ stopSequences: [`\nUSER:`],
88
+ };
89
+ }
90
+ exports.chat = chat;
@@ -1,5 +1,6 @@
1
- import { MultiModalInstructionPrompt } from "../../model-function/generate-text/prompt-format/InstructionPrompt.js";
2
1
  import { TextGenerationPromptFormat } from "../../model-function/generate-text/TextGenerationPromptFormat.js";
2
+ import { MultiModalChatPrompt } from "../../model-function/generate-text/prompt-format/ChatPrompt.js";
3
+ import { MultiModalInstructionPrompt } from "../../model-function/generate-text/prompt-format/InstructionPrompt.js";
3
4
  import { LlamaCppTextGenerationPrompt } from "./LlamaCppTextGenerationModel.js";
4
5
  /**
5
6
  * BakLLaVA 1 uses a Vicuna 1 prompt. This mapping combines it with the LlamaCpp prompt structure.
@@ -7,3 +8,4 @@ import { LlamaCppTextGenerationPrompt } from "./LlamaCppTextGenerationModel.js";
7
8
  * @see https://github.com/SkunkworksAI/BakLLaVA
8
9
  */
9
10
  export declare function instruction(): TextGenerationPromptFormat<MultiModalInstructionPrompt, LlamaCppTextGenerationPrompt>;
11
+ export declare function chat(): TextGenerationPromptFormat<MultiModalChatPrompt, LlamaCppTextGenerationPrompt>;
@@ -1,3 +1,4 @@
1
+ import { validateChatPrompt, } from "../../model-function/generate-text/prompt-format/ChatPrompt.js";
1
2
  // default Vicuna 1 system message
2
3
  const DEFAULT_SYSTEM_MESSAGE = "A chat between a curious user and an artificial intelligence assistant. " +
3
4
  "The assistant gives helpful, detailed, and polite answers to the user's questions.";
@@ -30,6 +31,52 @@ export function instruction() {
30
31
  }
31
32
  text += `${content}\n`;
32
33
  }
34
+ text += `\nASSISTANT: `;
35
+ return { text, images };
36
+ },
37
+ stopSequences: [`\nUSER:`],
38
+ };
39
+ }
40
+ export function chat() {
41
+ return {
42
+ format(prompt) {
43
+ validateChatPrompt(prompt);
44
+ let text = "";
45
+ text += `${prompt.system ?? DEFAULT_SYSTEM_MESSAGE}\n\n`;
46
+ // construct text and image mapping:
47
+ let imageCounter = 1;
48
+ const images = {};
49
+ for (const { role, content } of prompt.messages) {
50
+ switch (role) {
51
+ case "user": {
52
+ text += `USER: `;
53
+ for (const part of content) {
54
+ switch (part.type) {
55
+ case "text": {
56
+ text += part.text;
57
+ break;
58
+ }
59
+ case "image": {
60
+ text += `[img-${imageCounter}]`;
61
+ images[imageCounter.toString()] = part.base64Image;
62
+ imageCounter++;
63
+ break;
64
+ }
65
+ }
66
+ }
67
+ break;
68
+ }
69
+ case "assistant": {
70
+ text += `ASSISTANT: ${content}`;
71
+ break;
72
+ }
73
+ default: {
74
+ const _exhaustiveCheck = role;
75
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
76
+ }
77
+ }
78
+ text += `\n\n`;
79
+ }
33
80
  text += `ASSISTANT: `;
34
81
  return { text, images };
35
82
  },
@@ -183,7 +183,7 @@ export declare class OpenAICompletionModel extends AbstractModel<OpenAICompletio
183
183
  withChatPrompt(options?: {
184
184
  user?: string;
185
185
  assistant?: string;
186
- }): PromptFormatTextStreamingModel<import("../../index.js").ChatPrompt, string, OpenAICompletionModelSettings, this>;
186
+ }): PromptFormatTextStreamingModel<import("../../index.js").TextChatPrompt, string, OpenAICompletionModelSettings, this>;
187
187
  withPromptFormat<INPUT_PROMPT>(promptFormat: TextGenerationPromptFormat<INPUT_PROMPT, string>): PromptFormatTextStreamingModel<INPUT_PROMPT, string, OpenAICompletionModelSettings, this>;
188
188
  withSettings(additionalSettings: Partial<OpenAICompletionModelSettings>): this;
189
189
  }
@@ -51,12 +51,6 @@ class OpenAITextEmbeddingModel extends AbstractModel_js_1.AbstractModel {
51
51
  writable: true,
52
52
  value: "openai"
53
53
  });
54
- Object.defineProperty(this, "maxValuesPerCall", {
55
- enumerable: true,
56
- configurable: true,
57
- writable: true,
58
- value: 2048
59
- });
60
54
  Object.defineProperty(this, "isParallelizable", {
61
55
  enumerable: true,
62
56
  configurable: true,
@@ -90,6 +84,9 @@ class OpenAITextEmbeddingModel extends AbstractModel_js_1.AbstractModel {
90
84
  get modelName() {
91
85
  return this.settings.model;
92
86
  }
87
+ get maxValuesPerCall() {
88
+ return this.settings.maxValuesPerCall ?? 2048;
89
+ }
93
90
  async countTokens(input) {
94
91
  return (0, countTokens_js_1.countTokens)(this.tokenizer, input);
95
92
  }
@@ -19,6 +19,7 @@ export declare const calculateOpenAIEmbeddingCostInMillicents: ({ model, respons
19
19
  }) => number;
20
20
  export interface OpenAITextEmbeddingModelSettings extends EmbeddingModelSettings {
21
21
  api?: ApiConfiguration;
22
+ maxValuesPerCall?: number | undefined;
22
23
  model: OpenAITextEmbeddingModelType;
23
24
  isUserIdForwardingEnabled?: boolean;
24
25
  }
@@ -40,7 +41,7 @@ export declare class OpenAITextEmbeddingModel extends AbstractModel<OpenAITextEm
40
41
  constructor(settings: OpenAITextEmbeddingModelSettings);
41
42
  readonly provider: "openai";
42
43
  get modelName(): "text-embedding-ada-002";
43
- readonly maxValuesPerCall = 2048;
44
+ get maxValuesPerCall(): number;
44
45
  readonly isParallelizable = true;
45
46
  readonly embeddingDimensions: number;
46
47
  readonly tokenizer: TikTokenTokenizer;
@@ -46,12 +46,6 @@ export class OpenAITextEmbeddingModel extends AbstractModel {
46
46
  writable: true,
47
47
  value: "openai"
48
48
  });
49
- Object.defineProperty(this, "maxValuesPerCall", {
50
- enumerable: true,
51
- configurable: true,
52
- writable: true,
53
- value: 2048
54
- });
55
49
  Object.defineProperty(this, "isParallelizable", {
56
50
  enumerable: true,
57
51
  configurable: true,
@@ -85,6 +79,9 @@ export class OpenAITextEmbeddingModel extends AbstractModel {
85
79
  get modelName() {
86
80
  return this.settings.model;
87
81
  }
82
+ get maxValuesPerCall() {
83
+ return this.settings.maxValuesPerCall ?? 2048;
84
+ }
88
85
  async countTokens(input) {
89
86
  return countTokens(this.tokenizer, input);
90
87
  }
@@ -31,7 +31,7 @@ OpenAIChatSettings> {
31
31
  /**
32
32
  * Returns this model with a chat prompt format.
33
33
  */
34
- withChatPrompt(): OpenAIChatFunctionCallStructureGenerationModel<TextGenerationPromptFormat<import("../../../index.js").ChatPrompt, import("./OpenAIChatMessage.js").OpenAIChatMessage[]>>;
34
+ withChatPrompt(): OpenAIChatFunctionCallStructureGenerationModel<TextGenerationPromptFormat<import("../../../index.js").TextChatPrompt | import("../../../index.js").MultiModalChatPrompt, import("./OpenAIChatMessage.js").OpenAIChatMessage[]>>;
35
35
  withPromptFormat<TARGET_PROMPT_FORMAT extends TextGenerationPromptFormat<unknown, OpenAIChatPrompt>>(promptFormat: TARGET_PROMPT_FORMAT): OpenAIChatFunctionCallStructureGenerationModel<TARGET_PROMPT_FORMAT>;
36
36
  withSettings(additionalSettings: Partial<OpenAIChatSettings>): this;
37
37
  /**
@@ -155,7 +155,7 @@ export declare class OpenAIChatModel extends AbstractOpenAIChatModel<OpenAIChatS
155
155
  /**
156
156
  * Returns this model with a chat prompt format.
157
157
  */
158
- withChatPrompt(): PromptFormatTextStreamingModel<import("../../../index.js").ChatPrompt, OpenAIChatPrompt, OpenAIChatSettings, this>;
158
+ withChatPrompt(): PromptFormatTextStreamingModel<import("../../../index.js").TextChatPrompt | import("../../../index.js").MultiModalChatPrompt, OpenAIChatPrompt, OpenAIChatSettings, this>;
159
159
  withPromptFormat<INPUT_PROMPT>(promptFormat: TextGenerationPromptFormat<INPUT_PROMPT, OpenAIChatPrompt>): PromptFormatTextStreamingModel<INPUT_PROMPT, OpenAIChatPrompt, OpenAIChatSettings, this>;
160
160
  withSettings(additionalSettings: Partial<OpenAIChatSettings>): this;
161
161
  }
@@ -1,5 +1,5 @@
1
1
  import { TextGenerationPromptFormat } from "../../../model-function/generate-text/TextGenerationPromptFormat.js";
2
- import { ChatPrompt } from "../../../model-function/generate-text/prompt-format/ChatPrompt.js";
2
+ import { MultiModalChatPrompt, TextChatPrompt } from "../../../model-function/generate-text/prompt-format/ChatPrompt.js";
3
3
  import { MultiModalInstructionPrompt, TextInstructionPrompt } from "../../../model-function/generate-text/prompt-format/InstructionPrompt.js";
4
4
  import { OpenAIChatMessage } from "./OpenAIChatMessage.js";
5
5
  /**
@@ -17,4 +17,4 @@ export declare function instruction(): TextGenerationPromptFormat<MultiModalInst
17
17
  /**
18
18
  * Formats a chat prompt as an OpenAI chat prompt.
19
19
  */
20
- export declare function chat(): TextGenerationPromptFormat<ChatPrompt, Array<OpenAIChatMessage>>;
20
+ export declare function chat(): TextGenerationPromptFormat<MultiModalChatPrompt | TextChatPrompt, Array<OpenAIChatMessage>>;
@@ -39,7 +39,7 @@ export declare class OpenAICompatibleChatModel extends AbstractOpenAIChatModel<O
39
39
  /**
40
40
  * Returns this model with a chat prompt format.
41
41
  */
42
- withChatPrompt(): PromptFormatTextStreamingModel<import("../../index.js").ChatPrompt, OpenAIChatPrompt, OpenAICompatibleChatSettings, this>;
42
+ withChatPrompt(): PromptFormatTextStreamingModel<import("../../index.js").TextChatPrompt | import("../../index.js").MultiModalChatPrompt, OpenAIChatPrompt, OpenAICompatibleChatSettings, this>;
43
43
  withPromptFormat<INPUT_PROMPT>(promptFormat: TextGenerationPromptFormat<INPUT_PROMPT, OpenAIChatPrompt>): PromptFormatTextStreamingModel<INPUT_PROMPT, OpenAIChatPrompt, OpenAICompatibleChatSettings, this>;
44
44
  withSettings(additionalSettings: Partial<OpenAICompatibleChatSettings>): this;
45
45
  }
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "modelfusion",
3
3
  "description": "The TypeScript library for building multi-modal AI applications.",
4
- "version": "0.87.2",
4
+ "version": "0.89.0",
5
5
  "author": "Lars Grammel",
6
6
  "license": "MIT",
7
7
  "keywords": [
@@ -74,7 +74,7 @@
74
74
  "secure-json-parse": "2.7.0",
75
75
  "ws": "8.14.2",
76
76
  "zod": "3.22.4",
77
- "zod-to-json-schema": "3.22.0"
77
+ "zod-to-json-schema": "3.22.1"
78
78
  },
79
79
  "devDependencies": {
80
80
  "@types/node": "18.11.9",