modelfusion 0.63.0 → 0.65.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. package/README.md +24 -12
  2. package/model-function/generate-text/index.cjs +8 -8
  3. package/model-function/generate-text/index.d.ts +8 -8
  4. package/model-function/generate-text/index.js +8 -8
  5. package/model-function/generate-text/{AlpacaPromptFormat.d.ts → prompt-format/AlpacaPromptFormat.d.ts} +1 -1
  6. package/model-function/generate-text/{InstructionPrompt.d.ts → prompt-format/InstructionPrompt.d.ts} +13 -0
  7. package/model-function/generate-text/{Llama2PromptFormat.d.ts → prompt-format/Llama2PromptFormat.d.ts} +1 -1
  8. package/model-function/generate-text/{TextPromptFormat.d.ts → prompt-format/TextPromptFormat.d.ts} +1 -1
  9. package/model-function/generate-text/{VicunaPromptFormat.cjs → prompt-format/VicunaPromptFormat.cjs} +4 -2
  10. package/model-function/generate-text/{VicunaPromptFormat.d.ts → prompt-format/VicunaPromptFormat.d.ts} +1 -1
  11. package/model-function/generate-text/{VicunaPromptFormat.js → prompt-format/VicunaPromptFormat.js} +4 -2
  12. package/model-function/generate-text/{trimChatPrompt.d.ts → prompt-format/trimChatPrompt.d.ts} +1 -1
  13. package/model-provider/anthropic/AnthropicPromptFormat.cjs +1 -1
  14. package/model-provider/anthropic/AnthropicPromptFormat.d.ts +2 -2
  15. package/model-provider/anthropic/AnthropicPromptFormat.js +1 -1
  16. package/model-provider/cohere/CohereTextGenerationModel.cjs +1 -1
  17. package/model-provider/cohere/CohereTextGenerationModel.js +1 -1
  18. package/model-provider/llamacpp/index.cjs +1 -0
  19. package/model-provider/llamacpp/index.d.ts +1 -0
  20. package/model-provider/llamacpp/index.js +1 -0
  21. package/model-provider/llamacpp/mapInstructionPromptToBakLLaVA1ForLlamaCppFormat.cjs +36 -0
  22. package/model-provider/llamacpp/mapInstructionPromptToBakLLaVA1ForLlamaCppFormat.d.ts +9 -0
  23. package/model-provider/llamacpp/mapInstructionPromptToBakLLaVA1ForLlamaCppFormat.js +32 -0
  24. package/model-provider/openai/OpenAICompletionModel.cjs +1 -1
  25. package/model-provider/openai/OpenAICompletionModel.js +1 -1
  26. package/model-provider/openai/chat/OpenAIChatMessage.cjs +15 -2
  27. package/model-provider/openai/chat/OpenAIChatMessage.d.ts +32 -3
  28. package/model-provider/openai/chat/OpenAIChatMessage.js +15 -2
  29. package/model-provider/openai/chat/OpenAIChatPromptFormat.cjs +7 -13
  30. package/model-provider/openai/chat/OpenAIChatPromptFormat.d.ts +2 -2
  31. package/model-provider/openai/chat/OpenAIChatPromptFormat.js +7 -13
  32. package/model-provider/openai/chat/countOpenAIChatMessageTokens.cjs +19 -3
  33. package/model-provider/openai/chat/countOpenAIChatMessageTokens.js +19 -3
  34. package/package.json +1 -1
  35. /package/model-function/generate-text/{AlpacaPromptFormat.cjs → prompt-format/AlpacaPromptFormat.cjs} +0 -0
  36. /package/model-function/generate-text/{AlpacaPromptFormat.js → prompt-format/AlpacaPromptFormat.js} +0 -0
  37. /package/model-function/generate-text/{ChatPrompt.cjs → prompt-format/ChatPrompt.cjs} +0 -0
  38. /package/model-function/generate-text/{ChatPrompt.d.ts → prompt-format/ChatPrompt.d.ts} +0 -0
  39. /package/model-function/generate-text/{ChatPrompt.js → prompt-format/ChatPrompt.js} +0 -0
  40. /package/model-function/generate-text/{InstructionPrompt.cjs → prompt-format/InstructionPrompt.cjs} +0 -0
  41. /package/model-function/generate-text/{InstructionPrompt.js → prompt-format/InstructionPrompt.js} +0 -0
  42. /package/model-function/generate-text/{Llama2PromptFormat.cjs → prompt-format/Llama2PromptFormat.cjs} +0 -0
  43. /package/model-function/generate-text/{Llama2PromptFormat.js → prompt-format/Llama2PromptFormat.js} +0 -0
  44. /package/model-function/generate-text/{TextPromptFormat.cjs → prompt-format/TextPromptFormat.cjs} +0 -0
  45. /package/model-function/generate-text/{TextPromptFormat.js → prompt-format/TextPromptFormat.js} +0 -0
  46. /package/model-function/generate-text/{trimChatPrompt.cjs → prompt-format/trimChatPrompt.cjs} +0 -0
  47. /package/model-function/generate-text/{trimChatPrompt.js → prompt-format/trimChatPrompt.js} +0 -0
  48. /package/model-function/generate-text/{validateChatPrompt.cjs → prompt-format/validateChatPrompt.cjs} +0 -0
  49. /package/model-function/generate-text/{validateChatPrompt.d.ts → prompt-format/validateChatPrompt.d.ts} +0 -0
  50. /package/model-function/generate-text/{validateChatPrompt.js → prompt-format/validateChatPrompt.js} +0 -0
package/README.md CHANGED
@@ -14,8 +14,8 @@
14
14
 
15
15
  **ModelFusion** is a TypeScript library for building AI applications, chatbots, and agents.
16
16
 
17
- - **Vendor-neutral**: ModelFusion is a non-commercial open source project that is community-driven. You can use it with any supported vendor.
18
- - **Multimodal**: ModelFusion supports a wide range of models including text generation, image generation, text-to-speech, speech-to-text, and embedding models.
17
+ - **Vendor-neutral**: ModelFusion is a non-commercial open source project that is community-driven. You can use it with any supported provider.
18
+ - **Multimodal**: ModelFusion supports a wide range of models including text generation, image generation, vision, text-to-speech, speech-to-text, and embedding models.
19
19
  - **Streaming**: ModelFusion supports streaming for many generation models, e.g. text streaming, structure streaming, and full duplex speech streaming.
20
20
  - **Utility functions**: ModelFusion provides functionality for tools and tool usage, vector indices, and guards functions.
21
21
  - **Type inference and validation**: ModelFusion infers TypeScript types wherever possible and to validates model responses.
@@ -47,9 +47,7 @@ You can use [prompt formats](https://modelfusion.dev/guide/function/generate-tex
47
47
 
48
48
  ```ts
49
49
  const text = await generateText(
50
- new OpenAICompletionModel({
51
- model: "gpt-3.5-turbo-instruct",
52
- }),
50
+ new OpenAICompletionModel({ model: "gpt-3.5-turbo-instruct" }),
53
51
  "Write a short story about a robot learning to love:\n\n"
54
52
  );
55
53
  ```
@@ -60,9 +58,7 @@ Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai),
60
58
 
61
59
  ```ts
62
60
  const textStream = await streamText(
63
- new OpenAICompletionModel({
64
- model: "gpt-3.5-turbo-instruct",
65
- }),
61
+ new OpenAICompletionModel({ model: "gpt-3.5-turbo-instruct" }),
66
62
  "Write a short story about a robot learning to love:\n\n"
67
63
  );
68
64
 
@@ -73,16 +69,30 @@ for await (const textPart of textStream) {
73
69
 
74
70
  Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [Anthropic](https://modelfusion.dev/integration/model-provider/anthropic), [Cohere](https://modelfusion.dev/integration/model-provider/cohere), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp), [Ollama](https://modelfusion.dev/integration/model-provider/ollama)
75
71
 
72
+ #### streamText with multi-modal prompt
73
+
74
+ Multi-modal vision models such as GPT 4 Vision can process images as part of the prompt.
75
+
76
+ ```ts
77
+ const textStream = await streamText(
78
+ new OpenAIChatModel({ model: "gpt-4-vision-preview" }),
79
+ [
80
+ OpenAIChatMessage.user("Describe the image in detail:", {
81
+ image: { base64Content: image, mimeType: "image/png" },
82
+ }),
83
+ ]
84
+ );
85
+ ```
86
+
87
+ Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp)
88
+
76
89
  ### [Generate Image](https://modelfusion.dev/guide/function/generate-image)
77
90
 
78
91
  Generate an image from a prompt.
79
92
 
80
93
  ```ts
81
94
  const image = await generateImage(
82
- new OpenAIImageGenerationModel({
83
- model: "dall-e-3",
84
- size: "1024x1024",
85
- }),
95
+ new OpenAIImageGenerationModel({ model: "dall-e-3", size: "1024x1024" }),
86
96
  "the wicked witch of the west in the style of early 19th century painting"
87
97
  );
88
98
  ```
@@ -499,6 +509,8 @@ const textStream = await streamText(
499
509
  | Vicuna | ❌ | ✅ |
500
510
  | Generic Text | ✅ | ✅ |
501
511
 
512
+ #### [Vision Prompts]
513
+
502
514
  #### [Image Generation Prompt Formats](https://modelfusion.dev/guide/function/generate-image/prompt-format)
503
515
 
504
516
  You an use prompt formats with image models as well, e.g. to use a basic text prompt. It is available as a shorthand method:
@@ -14,18 +14,18 @@ var __exportStar = (this && this.__exportStar) || function(m, exports) {
14
14
  for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
15
15
  };
16
16
  Object.defineProperty(exports, "__esModule", { value: true });
17
- __exportStar(require("./AlpacaPromptFormat.cjs"), exports);
18
- __exportStar(require("./ChatPrompt.cjs"), exports);
19
- __exportStar(require("./InstructionPrompt.cjs"), exports);
20
- __exportStar(require("./Llama2PromptFormat.cjs"), exports);
21
17
  __exportStar(require("./PromptFormatTextGenerationModel.cjs"), exports);
22
18
  __exportStar(require("./PromptFormatTextStreamingModel.cjs"), exports);
23
19
  __exportStar(require("./TextGenerationEvent.cjs"), exports);
24
20
  __exportStar(require("./TextGenerationModel.cjs"), exports);
25
21
  __exportStar(require("./TextGenerationPromptFormat.cjs"), exports);
26
- __exportStar(require("./TextPromptFormat.cjs"), exports);
27
- __exportStar(require("./VicunaPromptFormat.cjs"), exports);
28
22
  __exportStar(require("./generateText.cjs"), exports);
23
+ __exportStar(require("./prompt-format/AlpacaPromptFormat.cjs"), exports);
24
+ __exportStar(require("./prompt-format/ChatPrompt.cjs"), exports);
25
+ __exportStar(require("./prompt-format/InstructionPrompt.cjs"), exports);
26
+ __exportStar(require("./prompt-format/Llama2PromptFormat.cjs"), exports);
27
+ __exportStar(require("./prompt-format/TextPromptFormat.cjs"), exports);
28
+ __exportStar(require("./prompt-format/VicunaPromptFormat.cjs"), exports);
29
+ __exportStar(require("./prompt-format/trimChatPrompt.cjs"), exports);
30
+ __exportStar(require("./prompt-format/validateChatPrompt.cjs"), exports);
29
31
  __exportStar(require("./streamText.cjs"), exports);
30
- __exportStar(require("./trimChatPrompt.cjs"), exports);
31
- __exportStar(require("./validateChatPrompt.cjs"), exports);
@@ -1,15 +1,15 @@
1
- export * from "./AlpacaPromptFormat.js";
2
- export * from "./ChatPrompt.js";
3
- export * from "./InstructionPrompt.js";
4
- export * from "./Llama2PromptFormat.js";
5
1
  export * from "./PromptFormatTextGenerationModel.js";
6
2
  export * from "./PromptFormatTextStreamingModel.js";
7
3
  export * from "./TextGenerationEvent.js";
8
4
  export * from "./TextGenerationModel.js";
9
5
  export * from "./TextGenerationPromptFormat.js";
10
- export * from "./TextPromptFormat.js";
11
- export * from "./VicunaPromptFormat.js";
12
6
  export * from "./generateText.js";
7
+ export * from "./prompt-format/AlpacaPromptFormat.js";
8
+ export * from "./prompt-format/ChatPrompt.js";
9
+ export * from "./prompt-format/InstructionPrompt.js";
10
+ export * from "./prompt-format/Llama2PromptFormat.js";
11
+ export * from "./prompt-format/TextPromptFormat.js";
12
+ export * from "./prompt-format/VicunaPromptFormat.js";
13
+ export * from "./prompt-format/trimChatPrompt.js";
14
+ export * from "./prompt-format/validateChatPrompt.js";
13
15
  export * from "./streamText.js";
14
- export * from "./trimChatPrompt.js";
15
- export * from "./validateChatPrompt.js";
@@ -1,15 +1,15 @@
1
- export * from "./AlpacaPromptFormat.js";
2
- export * from "./ChatPrompt.js";
3
- export * from "./InstructionPrompt.js";
4
- export * from "./Llama2PromptFormat.js";
5
1
  export * from "./PromptFormatTextGenerationModel.js";
6
2
  export * from "./PromptFormatTextStreamingModel.js";
7
3
  export * from "./TextGenerationEvent.js";
8
4
  export * from "./TextGenerationModel.js";
9
5
  export * from "./TextGenerationPromptFormat.js";
10
- export * from "./TextPromptFormat.js";
11
- export * from "./VicunaPromptFormat.js";
12
6
  export * from "./generateText.js";
7
+ export * from "./prompt-format/AlpacaPromptFormat.js";
8
+ export * from "./prompt-format/ChatPrompt.js";
9
+ export * from "./prompt-format/InstructionPrompt.js";
10
+ export * from "./prompt-format/Llama2PromptFormat.js";
11
+ export * from "./prompt-format/TextPromptFormat.js";
12
+ export * from "./prompt-format/VicunaPromptFormat.js";
13
+ export * from "./prompt-format/trimChatPrompt.js";
14
+ export * from "./prompt-format/validateChatPrompt.js";
13
15
  export * from "./streamText.js";
14
- export * from "./trimChatPrompt.js";
15
- export * from "./validateChatPrompt.js";
@@ -1,5 +1,5 @@
1
1
  import { InstructionPrompt } from "./InstructionPrompt.js";
2
- import { TextGenerationPromptFormat } from "./TextGenerationPromptFormat.js";
2
+ import { TextGenerationPromptFormat } from "../TextGenerationPromptFormat.js";
3
3
  /**
4
4
  * Formats an instruction prompt as an Alpaca prompt.
5
5
  *
@@ -25,4 +25,17 @@ export type InstructionPrompt = {
25
25
  * Optional additional input or context, e.g. a the content from which information should be extracted.
26
26
  */
27
27
  input?: string;
28
+ /**
29
+ * Optional image to provide context for the language model. Only supported by some models.
30
+ */
31
+ image?: {
32
+ /**
33
+ * Base-64 encoded image.
34
+ */
35
+ base64Content: string;
36
+ /**
37
+ * Optional mime type of the image.
38
+ */
39
+ mimeType?: string;
40
+ };
28
41
  };
@@ -1,6 +1,6 @@
1
1
  import { ChatPrompt } from "./ChatPrompt.js";
2
2
  import { InstructionPrompt } from "./InstructionPrompt.js";
3
- import { TextGenerationPromptFormat } from "./TextGenerationPromptFormat.js";
3
+ import { TextGenerationPromptFormat } from "../TextGenerationPromptFormat.js";
4
4
  /**
5
5
  * Formats an instruction prompt as a Llama 2 prompt.
6
6
  *
@@ -1,6 +1,6 @@
1
1
  import { ChatPrompt } from "./ChatPrompt.js";
2
2
  import { InstructionPrompt } from "./InstructionPrompt.js";
3
- import { TextGenerationPromptFormat } from "./TextGenerationPromptFormat.js";
3
+ import { TextGenerationPromptFormat } from "../TextGenerationPromptFormat.js";
4
4
  /**
5
5
  * Formats an instruction prompt as a basic text prompt.
6
6
  */
@@ -2,7 +2,9 @@
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.mapChatPromptToVicunaFormat = void 0;
4
4
  const validateChatPrompt_js_1 = require("./validateChatPrompt.cjs");
5
- const DEFAULT_SYSTEM_PROMPT = "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.";
5
+ // default Vicuna 1 system message
6
+ const DEFAULT_SYSTEM_MESSAGE = "A chat between a curious user and an artificial intelligence assistant. " +
7
+ "The assistant gives helpful, detailed, and polite answers to the user's questions.";
6
8
  /**
7
9
  * Formats a chat prompt as a Vicuna prompt.
8
10
  *
@@ -32,7 +34,7 @@ function mapChatPromptToVicunaFormat() {
32
34
  }
33
35
  // first message was not a system message:
34
36
  if (i === 0) {
35
- text += `${DEFAULT_SYSTEM_PROMPT}\n\n`;
37
+ text += `${DEFAULT_SYSTEM_MESSAGE}\n\n`;
36
38
  }
37
39
  // user message
38
40
  if ("user" in message) {
@@ -1,5 +1,5 @@
1
1
  import { ChatPrompt } from "./ChatPrompt.js";
2
- import { TextGenerationPromptFormat } from "./TextGenerationPromptFormat.js";
2
+ import { TextGenerationPromptFormat } from "../TextGenerationPromptFormat.js";
3
3
  /**
4
4
  * Formats a chat prompt as a Vicuna prompt.
5
5
  *
@@ -1,5 +1,7 @@
1
1
  import { validateChatPrompt } from "./validateChatPrompt.js";
2
- const DEFAULT_SYSTEM_PROMPT = "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.";
2
+ // default Vicuna 1 system message
3
+ const DEFAULT_SYSTEM_MESSAGE = "A chat between a curious user and an artificial intelligence assistant. " +
4
+ "The assistant gives helpful, detailed, and polite answers to the user's questions.";
3
5
  /**
4
6
  * Formats a chat prompt as a Vicuna prompt.
5
7
  *
@@ -29,7 +31,7 @@ export function mapChatPromptToVicunaFormat() {
29
31
  }
30
32
  // first message was not a system message:
31
33
  if (i === 0) {
32
- text += `${DEFAULT_SYSTEM_PROMPT}\n\n`;
34
+ text += `${DEFAULT_SYSTEM_MESSAGE}\n\n`;
33
35
  }
34
36
  // user message
35
37
  if ("user" in message) {
@@ -1,5 +1,5 @@
1
1
  import { ChatPrompt } from "./ChatPrompt.js";
2
- import { HasContextWindowSize, HasTokenizer, TextGenerationModel, TextGenerationModelSettings } from "./TextGenerationModel.js";
2
+ import { HasContextWindowSize, HasTokenizer, TextGenerationModel, TextGenerationModelSettings } from "../TextGenerationModel.js";
3
3
  /**
4
4
  * Keeps only the most recent messages in the prompt, while leaving enough space for the completion.
5
5
  *
@@ -1,7 +1,7 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.mapChatPromptToAnthropicFormat = exports.mapInstructionPromptToAnthropicFormat = void 0;
4
- const validateChatPrompt_js_1 = require("../../model-function/generate-text/validateChatPrompt.cjs");
4
+ const validateChatPrompt_js_1 = require("../../model-function/generate-text/prompt-format/validateChatPrompt.cjs");
5
5
  /**
6
6
  * Formats an instruction prompt as an Anthropic prompt.
7
7
  */
@@ -1,5 +1,5 @@
1
- import { ChatPrompt } from "../../model-function/generate-text/ChatPrompt.js";
2
- import { InstructionPrompt } from "../../model-function/generate-text/InstructionPrompt.js";
1
+ import { ChatPrompt } from "../../model-function/generate-text/prompt-format/ChatPrompt.js";
2
+ import { InstructionPrompt } from "../../model-function/generate-text/prompt-format/InstructionPrompt.js";
3
3
  import { TextGenerationPromptFormat } from "../../model-function/generate-text/TextGenerationPromptFormat.js";
4
4
  /**
5
5
  * Formats an instruction prompt as an Anthropic prompt.
@@ -1,4 +1,4 @@
1
- import { validateChatPrompt } from "../../model-function/generate-text/validateChatPrompt.js";
1
+ import { validateChatPrompt } from "../../model-function/generate-text/prompt-format/validateChatPrompt.js";
2
2
  /**
3
3
  * Formats an instruction prompt as an Anthropic prompt.
4
4
  */
@@ -6,7 +6,7 @@ const callWithRetryAndThrottle_js_1 = require("../../core/api/callWithRetryAndTh
6
6
  const postToApi_js_1 = require("../../core/api/postToApi.cjs");
7
7
  const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
8
8
  const PromptFormatTextStreamingModel_js_1 = require("../../model-function/generate-text/PromptFormatTextStreamingModel.cjs");
9
- const TextPromptFormat_js_1 = require("../../model-function/generate-text/TextPromptFormat.cjs");
9
+ const TextPromptFormat_js_1 = require("../../model-function/generate-text/prompt-format/TextPromptFormat.cjs");
10
10
  const countTokens_js_1 = require("../../model-function/tokenize-text/countTokens.cjs");
11
11
  const AsyncQueue_js_1 = require("../../util/AsyncQueue.cjs");
12
12
  const parseJsonStream_js_1 = require("../../util/streaming/parseJsonStream.cjs");
@@ -3,7 +3,7 @@ import { callWithRetryAndThrottle } from "../../core/api/callWithRetryAndThrottl
3
3
  import { createJsonResponseHandler, postJsonToApi, } from "../../core/api/postToApi.js";
4
4
  import { AbstractModel } from "../../model-function/AbstractModel.js";
5
5
  import { PromptFormatTextStreamingModel } from "../../model-function/generate-text/PromptFormatTextStreamingModel.js";
6
- import { mapChatPromptToTextFormat, mapInstructionPromptToTextFormat, } from "../../model-function/generate-text/TextPromptFormat.js";
6
+ import { mapChatPromptToTextFormat, mapInstructionPromptToTextFormat, } from "../../model-function/generate-text/prompt-format/TextPromptFormat.js";
7
7
  import { countTokens } from "../../model-function/tokenize-text/countTokens.js";
8
8
  import { AsyncQueue } from "../../util/AsyncQueue.js";
9
9
  import { parseJsonStream } from "../../util/streaming/parseJsonStream.js";
@@ -21,3 +21,4 @@ Object.defineProperty(exports, "LlamaCppError", { enumerable: true, get: functio
21
21
  __exportStar(require("./LlamaCppTextEmbeddingModel.cjs"), exports);
22
22
  __exportStar(require("./LlamaCppTextGenerationModel.cjs"), exports);
23
23
  __exportStar(require("./LlamaCppTokenizer.cjs"), exports);
24
+ __exportStar(require("./mapInstructionPromptToBakLLaVA1ForLlamaCppFormat.cjs"), exports);
@@ -3,3 +3,4 @@ export { LlamaCppError, LlamaCppErrorData } from "./LlamaCppError.js";
3
3
  export * from "./LlamaCppTextEmbeddingModel.js";
4
4
  export * from "./LlamaCppTextGenerationModel.js";
5
5
  export * from "./LlamaCppTokenizer.js";
6
+ export * from "./mapInstructionPromptToBakLLaVA1ForLlamaCppFormat.js";
@@ -3,3 +3,4 @@ export { LlamaCppError } from "./LlamaCppError.js";
3
3
  export * from "./LlamaCppTextEmbeddingModel.js";
4
4
  export * from "./LlamaCppTextGenerationModel.js";
5
5
  export * from "./LlamaCppTokenizer.js";
6
+ export * from "./mapInstructionPromptToBakLLaVA1ForLlamaCppFormat.js";
@@ -0,0 +1,36 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.mapInstructionPromptToBakLLaVA1ForLlamaCppFormat = void 0;
4
+ // default Vicuna 1 system message
5
+ const DEFAULT_SYSTEM_MESSAGE = "A chat between a curious user and an artificial intelligence assistant. " +
6
+ "The assistant gives helpful, detailed, and polite answers to the user's questions.";
7
+ /**
8
+ * BakLLaVA 1 uses a Vicuna 1 prompt. This mapping combines it with the LlamaCpp prompts.
9
+ *
10
+ * @see https://github.com/SkunkworksAI/BakLLaVA
11
+ */
12
+ function mapInstructionPromptToBakLLaVA1ForLlamaCppFormat() {
13
+ return {
14
+ format: (instruction) => {
15
+ let text = "";
16
+ text += `${instruction.system ?? DEFAULT_SYSTEM_MESSAGE}\n\n`;
17
+ text += `USER: `;
18
+ if (instruction.image != null) {
19
+ text += `[img-1]\n`;
20
+ }
21
+ text += `${instruction.instruction}\n`;
22
+ if (instruction.input != null) {
23
+ text += `${instruction.input}\n`;
24
+ }
25
+ text += `ASSISTANT: `;
26
+ return {
27
+ text,
28
+ images: instruction.image != null
29
+ ? { "1": instruction.image.base64Content }
30
+ : undefined,
31
+ };
32
+ },
33
+ stopSequences: [`\nUSER:`],
34
+ };
35
+ }
36
+ exports.mapInstructionPromptToBakLLaVA1ForLlamaCppFormat = mapInstructionPromptToBakLLaVA1ForLlamaCppFormat;
@@ -0,0 +1,9 @@
1
+ import { InstructionPrompt } from "../../model-function/generate-text/prompt-format/InstructionPrompt.js";
2
+ import { TextGenerationPromptFormat } from "../../model-function/generate-text/TextGenerationPromptFormat.js";
3
+ import { LlamaCppTextGenerationPrompt } from "./LlamaCppTextGenerationModel.js";
4
+ /**
5
+ * BakLLaVA 1 uses a Vicuna 1 prompt. This mapping combines it with the LlamaCpp prompts.
6
+ *
7
+ * @see https://github.com/SkunkworksAI/BakLLaVA
8
+ */
9
+ export declare function mapInstructionPromptToBakLLaVA1ForLlamaCppFormat(): TextGenerationPromptFormat<InstructionPrompt, LlamaCppTextGenerationPrompt>;
@@ -0,0 +1,32 @@
1
+ // default Vicuna 1 system message
2
+ const DEFAULT_SYSTEM_MESSAGE = "A chat between a curious user and an artificial intelligence assistant. " +
3
+ "The assistant gives helpful, detailed, and polite answers to the user's questions.";
4
+ /**
5
+ * BakLLaVA 1 uses a Vicuna 1 prompt. This mapping combines it with the LlamaCpp prompts.
6
+ *
7
+ * @see https://github.com/SkunkworksAI/BakLLaVA
8
+ */
9
+ export function mapInstructionPromptToBakLLaVA1ForLlamaCppFormat() {
10
+ return {
11
+ format: (instruction) => {
12
+ let text = "";
13
+ text += `${instruction.system ?? DEFAULT_SYSTEM_MESSAGE}\n\n`;
14
+ text += `USER: `;
15
+ if (instruction.image != null) {
16
+ text += `[img-1]\n`;
17
+ }
18
+ text += `${instruction.instruction}\n`;
19
+ if (instruction.input != null) {
20
+ text += `${instruction.input}\n`;
21
+ }
22
+ text += `ASSISTANT: `;
23
+ return {
24
+ text,
25
+ images: instruction.image != null
26
+ ? { "1": instruction.image.base64Content }
27
+ : undefined,
28
+ };
29
+ },
30
+ stopSequences: [`\nUSER:`],
31
+ };
32
+ }
@@ -6,7 +6,7 @@ const callWithRetryAndThrottle_js_1 = require("../../core/api/callWithRetryAndTh
6
6
  const postToApi_js_1 = require("../../core/api/postToApi.cjs");
7
7
  const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
8
8
  const PromptFormatTextStreamingModel_js_1 = require("../../model-function/generate-text/PromptFormatTextStreamingModel.cjs");
9
- const TextPromptFormat_js_1 = require("../../model-function/generate-text/TextPromptFormat.cjs");
9
+ const TextPromptFormat_js_1 = require("../../model-function/generate-text/prompt-format/TextPromptFormat.cjs");
10
10
  const countTokens_js_1 = require("../../model-function/tokenize-text/countTokens.cjs");
11
11
  const AsyncQueue_js_1 = require("../../util/AsyncQueue.cjs");
12
12
  const parseJSON_js_1 = require("../../util/parseJSON.cjs");
@@ -3,7 +3,7 @@ import { callWithRetryAndThrottle } from "../../core/api/callWithRetryAndThrottl
3
3
  import { createJsonResponseHandler, postJsonToApi, } from "../../core/api/postToApi.js";
4
4
  import { AbstractModel } from "../../model-function/AbstractModel.js";
5
5
  import { PromptFormatTextStreamingModel } from "../../model-function/generate-text/PromptFormatTextStreamingModel.js";
6
- import { mapChatPromptToTextFormat, mapInstructionPromptToTextFormat, } from "../../model-function/generate-text/TextPromptFormat.js";
6
+ import { mapChatPromptToTextFormat, mapInstructionPromptToTextFormat, } from "../../model-function/generate-text/prompt-format/TextPromptFormat.js";
7
7
  import { countTokens } from "../../model-function/tokenize-text/countTokens.js";
8
8
  import { AsyncQueue } from "../../util/AsyncQueue.js";
9
9
  import { parseJsonWithZod } from "../../util/parseJSON.js";
@@ -5,8 +5,21 @@ exports.OpenAIChatMessage = {
5
5
  system(content) {
6
6
  return { role: "system", content };
7
7
  },
8
- user(content) {
9
- return { role: "user", content };
8
+ user(content, options) {
9
+ if (options?.image != null) {
10
+ return {
11
+ role: "user",
12
+ content: [
13
+ { type: "text", text: content },
14
+ {
15
+ type: "image_url",
16
+ image_url: `data:${options.image.mimeType ?? "image/jpeg"};base64,${options.image.base64Content}`,
17
+ },
18
+ ],
19
+ name: options.name,
20
+ };
21
+ }
22
+ return { role: "user", content, name: options?.name };
10
23
  },
11
24
  assistant(content) {
12
25
  return { role: "assistant", content };
@@ -1,14 +1,37 @@
1
1
  export type OpenAIChatMessage = {
2
- role: "user" | "assistant" | "system";
2
+ role: "system";
3
3
  content: string;
4
4
  name?: string;
5
+ } | {
6
+ role: "user";
7
+ content: string | Array<{
8
+ type: "text";
9
+ text: string;
10
+ } | {
11
+ type: "image_url";
12
+ image_url: string;
13
+ }>;
14
+ name?: string;
5
15
  } | {
6
16
  role: "assistant";
7
17
  content: string | null;
8
- function_call: {
18
+ name?: string;
19
+ tool_calls?: Array<{
20
+ id: string;
21
+ type: "function";
22
+ function: {
23
+ name: string;
24
+ arguments: string;
25
+ };
26
+ }>;
27
+ function_call?: {
9
28
  name: string;
10
29
  arguments: string;
11
30
  };
31
+ } | {
32
+ role: "tool";
33
+ tool_call_id: string;
34
+ content: string | null;
12
35
  } | {
13
36
  role: "function";
14
37
  content: string;
@@ -16,7 +39,13 @@ export type OpenAIChatMessage = {
16
39
  };
17
40
  export declare const OpenAIChatMessage: {
18
41
  system(content: string): OpenAIChatMessage;
19
- user(content: string): OpenAIChatMessage;
42
+ user(content: string, options?: {
43
+ name?: string;
44
+ image?: {
45
+ base64Content: string;
46
+ mimeType?: string;
47
+ };
48
+ }): OpenAIChatMessage;
20
49
  assistant(content: string): OpenAIChatMessage;
21
50
  functionCall(content: string | null, functionCall: {
22
51
  name: string;
@@ -2,8 +2,21 @@ export const OpenAIChatMessage = {
2
2
  system(content) {
3
3
  return { role: "system", content };
4
4
  },
5
- user(content) {
6
- return { role: "user", content };
5
+ user(content, options) {
6
+ if (options?.image != null) {
7
+ return {
8
+ role: "user",
9
+ content: [
10
+ { type: "text", text: content },
11
+ {
12
+ type: "image_url",
13
+ image_url: `data:${options.image.mimeType ?? "image/jpeg"};base64,${options.image.base64Content}`,
14
+ },
15
+ ],
16
+ name: options.name,
17
+ };
18
+ }
19
+ return { role: "user", content, name: options?.name };
7
20
  },
8
21
  assistant(content) {
9
22
  return { role: "assistant", content };
@@ -1,7 +1,8 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.mapChatPromptToOpenAIChatFormat = exports.mapInstructionPromptToOpenAIChatFormat = void 0;
4
- const validateChatPrompt_js_1 = require("../../../model-function/generate-text/validateChatPrompt.cjs");
4
+ const validateChatPrompt_js_1 = require("../../../model-function/generate-text/prompt-format/validateChatPrompt.cjs");
5
+ const OpenAIChatMessage_js_1 = require("./OpenAIChatMessage.cjs");
5
6
  /**
6
7
  * Formats an instruction prompt as an OpenAI chat prompt.
7
8
  */
@@ -10,20 +11,13 @@ function mapInstructionPromptToOpenAIChatFormat() {
10
11
  format: (instruction) => {
11
12
  const messages = [];
12
13
  if (instruction.system != null) {
13
- messages.push({
14
- role: "system",
15
- content: instruction.system,
16
- });
14
+ messages.push(OpenAIChatMessage_js_1.OpenAIChatMessage.system(instruction.system));
17
15
  }
18
- messages.push({
19
- role: "user",
20
- content: instruction.instruction,
21
- });
16
+ messages.push(OpenAIChatMessage_js_1.OpenAIChatMessage.user(instruction.instruction, {
17
+ image: instruction.image,
18
+ }));
22
19
  if (instruction.input != null) {
23
- messages.push({
24
- role: "user",
25
- content: instruction.input,
26
- });
20
+ messages.push(OpenAIChatMessage_js_1.OpenAIChatMessage.user(instruction.input));
27
21
  }
28
22
  return messages;
29
23
  },
@@ -1,6 +1,6 @@
1
- import { ChatPrompt } from "../../../model-function/generate-text/ChatPrompt.js";
2
- import { InstructionPrompt } from "../../../model-function/generate-text/InstructionPrompt.js";
3
1
  import { TextGenerationPromptFormat } from "../../../model-function/generate-text/TextGenerationPromptFormat.js";
2
+ import { ChatPrompt } from "../../../model-function/generate-text/prompt-format/ChatPrompt.js";
3
+ import { InstructionPrompt } from "../../../model-function/generate-text/prompt-format/InstructionPrompt.js";
4
4
  import { OpenAIChatMessage } from "./OpenAIChatMessage.js";
5
5
  /**
6
6
  * Formats an instruction prompt as an OpenAI chat prompt.
@@ -1,4 +1,5 @@
1
- import { validateChatPrompt } from "../../../model-function/generate-text/validateChatPrompt.js";
1
+ import { validateChatPrompt } from "../../../model-function/generate-text/prompt-format/validateChatPrompt.js";
2
+ import { OpenAIChatMessage } from "./OpenAIChatMessage.js";
2
3
  /**
3
4
  * Formats an instruction prompt as an OpenAI chat prompt.
4
5
  */
@@ -7,20 +8,13 @@ export function mapInstructionPromptToOpenAIChatFormat() {
7
8
  format: (instruction) => {
8
9
  const messages = [];
9
10
  if (instruction.system != null) {
10
- messages.push({
11
- role: "system",
12
- content: instruction.system,
13
- });
11
+ messages.push(OpenAIChatMessage.system(instruction.system));
14
12
  }
15
- messages.push({
16
- role: "user",
17
- content: instruction.instruction,
18
- });
13
+ messages.push(OpenAIChatMessage.user(instruction.instruction, {
14
+ image: instruction.image,
15
+ }));
19
16
  if (instruction.input != null) {
20
- messages.push({
21
- role: "user",
22
- content: instruction.input,
23
- });
17
+ messages.push(OpenAIChatMessage.user(instruction.input));
24
18
  }
25
19
  return messages;
26
20
  },
@@ -15,10 +15,26 @@ exports.OPENAI_CHAT_PROMPT_BASE_TOKEN_COUNT = 2;
15
15
  */
16
16
  exports.OPENAI_CHAT_MESSAGE_BASE_TOKEN_COUNT = 5;
17
17
  async function countOpenAIChatMessageTokens({ message, model, }) {
18
- const contentTokenCount = await (0, countTokens_js_1.countTokens)(new TikTokenTokenizer_js_1.TikTokenTokenizer({
18
+ const tokenizer = new TikTokenTokenizer_js_1.TikTokenTokenizer({
19
19
  model: (0, OpenAIChatModel_js_1.getOpenAIChatModelInformation)(model).baseModel,
20
- }), message.content ?? "");
21
- return exports.OPENAI_CHAT_MESSAGE_BASE_TOKEN_COUNT + contentTokenCount;
20
+ });
21
+ // case: function call without content
22
+ if (message.content == null) {
23
+ return exports.OPENAI_CHAT_MESSAGE_BASE_TOKEN_COUNT;
24
+ }
25
+ // case: simple text content
26
+ if (typeof message.content === "string") {
27
+ return (exports.OPENAI_CHAT_MESSAGE_BASE_TOKEN_COUNT +
28
+ (await (0, countTokens_js_1.countTokens)(tokenizer, message.content)));
29
+ }
30
+ // case: array of content objects
31
+ let contentTokenCount = exports.OPENAI_CHAT_MESSAGE_BASE_TOKEN_COUNT;
32
+ for (const content of message.content) {
33
+ if (content.type === "text") {
34
+ contentTokenCount += await (0, countTokens_js_1.countTokens)(tokenizer, content.text);
35
+ }
36
+ }
37
+ return contentTokenCount;
22
38
  }
23
39
  exports.countOpenAIChatMessageTokens = countOpenAIChatMessageTokens;
24
40
  async function countOpenAIChatPromptTokens({ messages, model, }) {
@@ -12,10 +12,26 @@ export const OPENAI_CHAT_PROMPT_BASE_TOKEN_COUNT = 2;
12
12
  */
13
13
  export const OPENAI_CHAT_MESSAGE_BASE_TOKEN_COUNT = 5;
14
14
  export async function countOpenAIChatMessageTokens({ message, model, }) {
15
- const contentTokenCount = await countTokens(new TikTokenTokenizer({
15
+ const tokenizer = new TikTokenTokenizer({
16
16
  model: getOpenAIChatModelInformation(model).baseModel,
17
- }), message.content ?? "");
18
- return OPENAI_CHAT_MESSAGE_BASE_TOKEN_COUNT + contentTokenCount;
17
+ });
18
+ // case: function call without content
19
+ if (message.content == null) {
20
+ return OPENAI_CHAT_MESSAGE_BASE_TOKEN_COUNT;
21
+ }
22
+ // case: simple text content
23
+ if (typeof message.content === "string") {
24
+ return (OPENAI_CHAT_MESSAGE_BASE_TOKEN_COUNT +
25
+ (await countTokens(tokenizer, message.content)));
26
+ }
27
+ // case: array of content objects
28
+ let contentTokenCount = OPENAI_CHAT_MESSAGE_BASE_TOKEN_COUNT;
29
+ for (const content of message.content) {
30
+ if (content.type === "text") {
31
+ contentTokenCount += await countTokens(tokenizer, content.text);
32
+ }
33
+ }
34
+ return contentTokenCount;
19
35
  }
20
36
  export async function countOpenAIChatPromptTokens({ messages, model, }) {
21
37
  let tokens = OPENAI_CHAT_PROMPT_BASE_TOKEN_COUNT;
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "modelfusion",
3
3
  "description": "Build multimodal applications, chatbots, and agents with JavaScript and TypeScript.",
4
- "version": "0.63.0",
4
+ "version": "0.65.0",
5
5
  "author": "Lars Grammel",
6
6
  "license": "MIT",
7
7
  "keywords": [