modelfusion 0.63.0 → 0.64.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. package/model-function/generate-text/index.cjs +9 -8
  2. package/model-function/generate-text/index.d.ts +9 -8
  3. package/model-function/generate-text/index.js +9 -8
  4. package/model-function/generate-text/{AlpacaPromptFormat.d.ts → prompt-format/AlpacaPromptFormat.d.ts} +1 -1
  5. package/model-function/generate-text/{Llama2PromptFormat.d.ts → prompt-format/Llama2PromptFormat.d.ts} +1 -1
  6. package/model-function/generate-text/{TextPromptFormat.d.ts → prompt-format/TextPromptFormat.d.ts} +1 -1
  7. package/model-function/generate-text/{VicunaPromptFormat.d.ts → prompt-format/VicunaPromptFormat.d.ts} +1 -1
  8. package/model-function/generate-text/prompt-format/VisionInstructionPrompt.cjs +2 -0
  9. package/model-function/generate-text/prompt-format/VisionInstructionPrompt.d.ts +31 -0
  10. package/model-function/generate-text/prompt-format/VisionInstructionPrompt.js +1 -0
  11. package/model-function/generate-text/{trimChatPrompt.d.ts → prompt-format/trimChatPrompt.d.ts} +1 -1
  12. package/model-provider/anthropic/AnthropicPromptFormat.cjs +1 -1
  13. package/model-provider/anthropic/AnthropicPromptFormat.d.ts +2 -2
  14. package/model-provider/anthropic/AnthropicPromptFormat.js +1 -1
  15. package/model-provider/cohere/CohereTextGenerationModel.cjs +1 -1
  16. package/model-provider/cohere/CohereTextGenerationModel.js +1 -1
  17. package/model-provider/llamacpp/LlamaCppTextGenerationModel.cjs +4 -0
  18. package/model-provider/llamacpp/LlamaCppTextGenerationModel.d.ts +1 -0
  19. package/model-provider/llamacpp/LlamaCppTextGenerationModel.js +4 -0
  20. package/model-provider/llamacpp/mapVisionInstructionPromptToLlamaCppFormat.cjs +15 -0
  21. package/model-provider/llamacpp/mapVisionInstructionPromptToLlamaCppFormat.d.ts +4 -0
  22. package/model-provider/llamacpp/mapVisionInstructionPromptToLlamaCppFormat.js +11 -0
  23. package/model-provider/openai/OpenAICompletionModel.cjs +1 -1
  24. package/model-provider/openai/OpenAICompletionModel.js +1 -1
  25. package/model-provider/openai/chat/OpenAIChatMessage.d.ts +25 -2
  26. package/model-provider/openai/chat/OpenAIChatModel.cjs +3 -0
  27. package/model-provider/openai/chat/OpenAIChatModel.d.ts +1 -0
  28. package/model-provider/openai/chat/OpenAIChatModel.js +4 -1
  29. package/model-provider/openai/chat/OpenAIChatPromptFormat.cjs +25 -2
  30. package/model-provider/openai/chat/OpenAIChatPromptFormat.d.ts +7 -2
  31. package/model-provider/openai/chat/OpenAIChatPromptFormat.js +23 -1
  32. package/model-provider/openai/chat/countOpenAIChatMessageTokens.cjs +19 -3
  33. package/model-provider/openai/chat/countOpenAIChatMessageTokens.js +19 -3
  34. package/package.json +1 -1
  35. /package/model-function/generate-text/{AlpacaPromptFormat.cjs → prompt-format/AlpacaPromptFormat.cjs} +0 -0
  36. /package/model-function/generate-text/{AlpacaPromptFormat.js → prompt-format/AlpacaPromptFormat.js} +0 -0
  37. /package/model-function/generate-text/{ChatPrompt.cjs → prompt-format/ChatPrompt.cjs} +0 -0
  38. /package/model-function/generate-text/{ChatPrompt.d.ts → prompt-format/ChatPrompt.d.ts} +0 -0
  39. /package/model-function/generate-text/{ChatPrompt.js → prompt-format/ChatPrompt.js} +0 -0
  40. /package/model-function/generate-text/{InstructionPrompt.cjs → prompt-format/InstructionPrompt.cjs} +0 -0
  41. /package/model-function/generate-text/{InstructionPrompt.d.ts → prompt-format/InstructionPrompt.d.ts} +0 -0
  42. /package/model-function/generate-text/{InstructionPrompt.js → prompt-format/InstructionPrompt.js} +0 -0
  43. /package/model-function/generate-text/{Llama2PromptFormat.cjs → prompt-format/Llama2PromptFormat.cjs} +0 -0
  44. /package/model-function/generate-text/{Llama2PromptFormat.js → prompt-format/Llama2PromptFormat.js} +0 -0
  45. /package/model-function/generate-text/{TextPromptFormat.cjs → prompt-format/TextPromptFormat.cjs} +0 -0
  46. /package/model-function/generate-text/{TextPromptFormat.js → prompt-format/TextPromptFormat.js} +0 -0
  47. /package/model-function/generate-text/{VicunaPromptFormat.cjs → prompt-format/VicunaPromptFormat.cjs} +0 -0
  48. /package/model-function/generate-text/{VicunaPromptFormat.js → prompt-format/VicunaPromptFormat.js} +0 -0
  49. /package/model-function/generate-text/{trimChatPrompt.cjs → prompt-format/trimChatPrompt.cjs} +0 -0
  50. /package/model-function/generate-text/{trimChatPrompt.js → prompt-format/trimChatPrompt.js} +0 -0
  51. /package/model-function/generate-text/{validateChatPrompt.cjs → prompt-format/validateChatPrompt.cjs} +0 -0
  52. /package/model-function/generate-text/{validateChatPrompt.d.ts → prompt-format/validateChatPrompt.d.ts} +0 -0
  53. /package/model-function/generate-text/{validateChatPrompt.js → prompt-format/validateChatPrompt.js} +0 -0
@@ -14,18 +14,19 @@ var __exportStar = (this && this.__exportStar) || function(m, exports) {
14
14
  for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
15
15
  };
16
16
  Object.defineProperty(exports, "__esModule", { value: true });
17
- __exportStar(require("./AlpacaPromptFormat.cjs"), exports);
18
- __exportStar(require("./ChatPrompt.cjs"), exports);
19
- __exportStar(require("./InstructionPrompt.cjs"), exports);
20
- __exportStar(require("./Llama2PromptFormat.cjs"), exports);
21
17
  __exportStar(require("./PromptFormatTextGenerationModel.cjs"), exports);
22
18
  __exportStar(require("./PromptFormatTextStreamingModel.cjs"), exports);
23
19
  __exportStar(require("./TextGenerationEvent.cjs"), exports);
24
20
  __exportStar(require("./TextGenerationModel.cjs"), exports);
25
21
  __exportStar(require("./TextGenerationPromptFormat.cjs"), exports);
26
- __exportStar(require("./TextPromptFormat.cjs"), exports);
27
- __exportStar(require("./VicunaPromptFormat.cjs"), exports);
28
22
  __exportStar(require("./generateText.cjs"), exports);
23
+ __exportStar(require("./prompt-format/AlpacaPromptFormat.cjs"), exports);
24
+ __exportStar(require("./prompt-format/ChatPrompt.cjs"), exports);
25
+ __exportStar(require("./prompt-format/InstructionPrompt.cjs"), exports);
26
+ __exportStar(require("./prompt-format/Llama2PromptFormat.cjs"), exports);
27
+ __exportStar(require("./prompt-format/TextPromptFormat.cjs"), exports);
28
+ __exportStar(require("./prompt-format/VicunaPromptFormat.cjs"), exports);
29
+ __exportStar(require("./prompt-format/VisionInstructionPrompt.cjs"), exports);
30
+ __exportStar(require("./prompt-format/trimChatPrompt.cjs"), exports);
31
+ __exportStar(require("./prompt-format/validateChatPrompt.cjs"), exports);
29
32
  __exportStar(require("./streamText.cjs"), exports);
30
- __exportStar(require("./trimChatPrompt.cjs"), exports);
31
- __exportStar(require("./validateChatPrompt.cjs"), exports);
@@ -1,15 +1,16 @@
1
- export * from "./AlpacaPromptFormat.js";
2
- export * from "./ChatPrompt.js";
3
- export * from "./InstructionPrompt.js";
4
- export * from "./Llama2PromptFormat.js";
5
1
  export * from "./PromptFormatTextGenerationModel.js";
6
2
  export * from "./PromptFormatTextStreamingModel.js";
7
3
  export * from "./TextGenerationEvent.js";
8
4
  export * from "./TextGenerationModel.js";
9
5
  export * from "./TextGenerationPromptFormat.js";
10
- export * from "./TextPromptFormat.js";
11
- export * from "./VicunaPromptFormat.js";
12
6
  export * from "./generateText.js";
7
+ export * from "./prompt-format/AlpacaPromptFormat.js";
8
+ export * from "./prompt-format/ChatPrompt.js";
9
+ export * from "./prompt-format/InstructionPrompt.js";
10
+ export * from "./prompt-format/Llama2PromptFormat.js";
11
+ export * from "./prompt-format/TextPromptFormat.js";
12
+ export * from "./prompt-format/VicunaPromptFormat.js";
13
+ export * from "./prompt-format/VisionInstructionPrompt.js";
14
+ export * from "./prompt-format/trimChatPrompt.js";
15
+ export * from "./prompt-format/validateChatPrompt.js";
13
16
  export * from "./streamText.js";
14
- export * from "./trimChatPrompt.js";
15
- export * from "./validateChatPrompt.js";
@@ -1,15 +1,16 @@
1
- export * from "./AlpacaPromptFormat.js";
2
- export * from "./ChatPrompt.js";
3
- export * from "./InstructionPrompt.js";
4
- export * from "./Llama2PromptFormat.js";
5
1
  export * from "./PromptFormatTextGenerationModel.js";
6
2
  export * from "./PromptFormatTextStreamingModel.js";
7
3
  export * from "./TextGenerationEvent.js";
8
4
  export * from "./TextGenerationModel.js";
9
5
  export * from "./TextGenerationPromptFormat.js";
10
- export * from "./TextPromptFormat.js";
11
- export * from "./VicunaPromptFormat.js";
12
6
  export * from "./generateText.js";
7
+ export * from "./prompt-format/AlpacaPromptFormat.js";
8
+ export * from "./prompt-format/ChatPrompt.js";
9
+ export * from "./prompt-format/InstructionPrompt.js";
10
+ export * from "./prompt-format/Llama2PromptFormat.js";
11
+ export * from "./prompt-format/TextPromptFormat.js";
12
+ export * from "./prompt-format/VicunaPromptFormat.js";
13
+ export * from "./prompt-format/VisionInstructionPrompt.js";
14
+ export * from "./prompt-format/trimChatPrompt.js";
15
+ export * from "./prompt-format/validateChatPrompt.js";
13
16
  export * from "./streamText.js";
14
- export * from "./trimChatPrompt.js";
15
- export * from "./validateChatPrompt.js";
@@ -1,5 +1,5 @@
1
1
  import { InstructionPrompt } from "./InstructionPrompt.js";
2
- import { TextGenerationPromptFormat } from "./TextGenerationPromptFormat.js";
2
+ import { TextGenerationPromptFormat } from "../TextGenerationPromptFormat.js";
3
3
  /**
4
4
  * Formats an instruction prompt as an Alpaca prompt.
5
5
  *
@@ -1,6 +1,6 @@
1
1
  import { ChatPrompt } from "./ChatPrompt.js";
2
2
  import { InstructionPrompt } from "./InstructionPrompt.js";
3
- import { TextGenerationPromptFormat } from "./TextGenerationPromptFormat.js";
3
+ import { TextGenerationPromptFormat } from "../TextGenerationPromptFormat.js";
4
4
  /**
5
5
  * Formats an instruction prompt as a Llama 2 prompt.
6
6
  *
@@ -1,6 +1,6 @@
1
1
  import { ChatPrompt } from "./ChatPrompt.js";
2
2
  import { InstructionPrompt } from "./InstructionPrompt.js";
3
- import { TextGenerationPromptFormat } from "./TextGenerationPromptFormat.js";
3
+ import { TextGenerationPromptFormat } from "../TextGenerationPromptFormat.js";
4
4
  /**
5
5
  * Formats an instruction prompt as a basic text prompt.
6
6
  */
@@ -1,5 +1,5 @@
1
1
  import { ChatPrompt } from "./ChatPrompt.js";
2
- import { TextGenerationPromptFormat } from "./TextGenerationPromptFormat.js";
2
+ import { TextGenerationPromptFormat } from "../TextGenerationPromptFormat.js";
3
3
  /**
4
4
  * Formats a chat prompt as a Vicuna prompt.
5
5
  *
@@ -0,0 +1,2 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
@@ -0,0 +1,31 @@
1
+ /**
2
+ * A single instruction version prompt. It contains an instruction, a base64 encoded image
3
+ * and an optional mime type of the image.
4
+ *
5
+ * If no mime type is provided, the mime type default to "image/jpeg".
6
+ *
7
+ * @example
8
+ * ```ts
9
+ * {
10
+ * instruction: "Describe the image in detail:",
11
+ * image: fs.readFileSync(path.join("data", "example-image.png"), {
12
+ * encoding: "base64",
13
+ * }),
14
+ * mimeType: "image/png"
15
+ * }
16
+ * ```
17
+ */
18
+ export type VisionInstructionPrompt = {
19
+ /**
20
+ * The instruction for the model.
21
+ */
22
+ instruction: string;
23
+ /**
24
+ * Base-64 encoded image.
25
+ */
26
+ image: string;
27
+ /**
28
+ * Optional mime type of the image.
29
+ */
30
+ mimeType?: string;
31
+ };
@@ -1,5 +1,5 @@
1
1
  import { ChatPrompt } from "./ChatPrompt.js";
2
- import { HasContextWindowSize, HasTokenizer, TextGenerationModel, TextGenerationModelSettings } from "./TextGenerationModel.js";
2
+ import { HasContextWindowSize, HasTokenizer, TextGenerationModel, TextGenerationModelSettings } from "../TextGenerationModel.js";
3
3
  /**
4
4
  * Keeps only the most recent messages in the prompt, while leaving enough space for the completion.
5
5
  *
@@ -1,7 +1,7 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.mapChatPromptToAnthropicFormat = exports.mapInstructionPromptToAnthropicFormat = void 0;
4
- const validateChatPrompt_js_1 = require("../../model-function/generate-text/validateChatPrompt.cjs");
4
+ const validateChatPrompt_js_1 = require("../../model-function/generate-text/prompt-format/validateChatPrompt.cjs");
5
5
  /**
6
6
  * Formats an instruction prompt as an Anthropic prompt.
7
7
  */
@@ -1,5 +1,5 @@
1
- import { ChatPrompt } from "../../model-function/generate-text/ChatPrompt.js";
2
- import { InstructionPrompt } from "../../model-function/generate-text/InstructionPrompt.js";
1
+ import { ChatPrompt } from "../../model-function/generate-text/prompt-format/ChatPrompt.js";
2
+ import { InstructionPrompt } from "../../model-function/generate-text/prompt-format/InstructionPrompt.js";
3
3
  import { TextGenerationPromptFormat } from "../../model-function/generate-text/TextGenerationPromptFormat.js";
4
4
  /**
5
5
  * Formats an instruction prompt as an Anthropic prompt.
@@ -1,4 +1,4 @@
1
- import { validateChatPrompt } from "../../model-function/generate-text/validateChatPrompt.js";
1
+ import { validateChatPrompt } from "../../model-function/generate-text/prompt-format/validateChatPrompt.js";
2
2
  /**
3
3
  * Formats an instruction prompt as an Anthropic prompt.
4
4
  */
@@ -6,7 +6,7 @@ const callWithRetryAndThrottle_js_1 = require("../../core/api/callWithRetryAndTh
6
6
  const postToApi_js_1 = require("../../core/api/postToApi.cjs");
7
7
  const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
8
8
  const PromptFormatTextStreamingModel_js_1 = require("../../model-function/generate-text/PromptFormatTextStreamingModel.cjs");
9
- const TextPromptFormat_js_1 = require("../../model-function/generate-text/TextPromptFormat.cjs");
9
+ const TextPromptFormat_js_1 = require("../../model-function/generate-text/prompt-format/TextPromptFormat.cjs");
10
10
  const countTokens_js_1 = require("../../model-function/tokenize-text/countTokens.cjs");
11
11
  const AsyncQueue_js_1 = require("../../util/AsyncQueue.cjs");
12
12
  const parseJsonStream_js_1 = require("../../util/streaming/parseJsonStream.cjs");
@@ -3,7 +3,7 @@ import { callWithRetryAndThrottle } from "../../core/api/callWithRetryAndThrottl
3
3
  import { createJsonResponseHandler, postJsonToApi, } from "../../core/api/postToApi.js";
4
4
  import { AbstractModel } from "../../model-function/AbstractModel.js";
5
5
  import { PromptFormatTextStreamingModel } from "../../model-function/generate-text/PromptFormatTextStreamingModel.js";
6
- import { mapChatPromptToTextFormat, mapInstructionPromptToTextFormat, } from "../../model-function/generate-text/TextPromptFormat.js";
6
+ import { mapChatPromptToTextFormat, mapInstructionPromptToTextFormat, } from "../../model-function/generate-text/prompt-format/TextPromptFormat.js";
7
7
  import { countTokens } from "../../model-function/tokenize-text/countTokens.js";
8
8
  import { AsyncQueue } from "../../util/AsyncQueue.js";
9
9
  import { parseJsonStream } from "../../util/streaming/parseJsonStream.js";
@@ -12,6 +12,7 @@ const parseJSON_js_1 = require("../../util/parseJSON.cjs");
12
12
  const LlamaCppApiConfiguration_js_1 = require("./LlamaCppApiConfiguration.cjs");
13
13
  const LlamaCppError_js_1 = require("./LlamaCppError.cjs");
14
14
  const LlamaCppTokenizer_js_1 = require("./LlamaCppTokenizer.cjs");
15
+ const mapVisionInstructionPromptToLlamaCppFormat_js_1 = require("./mapVisionInstructionPromptToLlamaCppFormat.cjs");
15
16
  class LlamaCppTextGenerationModel extends AbstractModel_js_1.AbstractModel {
16
17
  constructor(settings = {}) {
17
18
  super({ settings });
@@ -107,6 +108,9 @@ class LlamaCppTextGenerationModel extends AbstractModel_js_1.AbstractModel {
107
108
  stopSequences: [],
108
109
  });
109
110
  }
111
+ withVisionInstructionPrompt() {
112
+ return this.withPromptFormat((0, mapVisionInstructionPromptToLlamaCppFormat_js_1.mapVisionInstructionPromptToLlamaCppFormat)());
113
+ }
110
114
  withPromptFormat(promptFormat) {
111
115
  return new PromptFormatTextStreamingModel_js_1.PromptFormatTextStreamingModel({
112
116
  model: this.withSettings({
@@ -111,6 +111,7 @@ export declare class LlamaCppTextGenerationModel<CONTEXT_WINDOW_SIZE extends num
111
111
  }>;
112
112
  doStreamText(prompt: LlamaCppTextGenerationPrompt, options?: FunctionOptions): Promise<AsyncIterable<Delta<string>>>;
113
113
  withTextPrompt(): PromptFormatTextStreamingModel<string, LlamaCppTextGenerationPrompt, LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>, this>;
114
+ withVisionInstructionPrompt(): PromptFormatTextStreamingModel<import("../../index.js").VisionInstructionPrompt, LlamaCppTextGenerationPrompt, LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>, this>;
114
115
  withPromptFormat<INPUT_PROMPT>(promptFormat: TextGenerationPromptFormat<INPUT_PROMPT, LlamaCppTextGenerationPrompt>): PromptFormatTextStreamingModel<INPUT_PROMPT, LlamaCppTextGenerationPrompt, LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>, this>;
115
116
  withSettings(additionalSettings: Partial<LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>>): this;
116
117
  }
@@ -9,6 +9,7 @@ import { parseJsonWithZod } from "../../util/parseJSON.js";
9
9
  import { LlamaCppApiConfiguration } from "./LlamaCppApiConfiguration.js";
10
10
  import { failedLlamaCppCallResponseHandler } from "./LlamaCppError.js";
11
11
  import { LlamaCppTokenizer } from "./LlamaCppTokenizer.js";
12
+ import { mapVisionInstructionPromptToLlamaCppFormat } from "./mapVisionInstructionPromptToLlamaCppFormat.js";
12
13
  export class LlamaCppTextGenerationModel extends AbstractModel {
13
14
  constructor(settings = {}) {
14
15
  super({ settings });
@@ -104,6 +105,9 @@ export class LlamaCppTextGenerationModel extends AbstractModel {
104
105
  stopSequences: [],
105
106
  });
106
107
  }
108
+ withVisionInstructionPrompt() {
109
+ return this.withPromptFormat(mapVisionInstructionPromptToLlamaCppFormat());
110
+ }
107
111
  withPromptFormat(promptFormat) {
108
112
  return new PromptFormatTextStreamingModel({
109
113
  model: this.withSettings({
@@ -0,0 +1,15 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.mapVisionInstructionPromptToLlamaCppFormat = void 0;
4
+ function mapVisionInstructionPromptToLlamaCppFormat() {
5
+ return {
6
+ format: ({ instruction, image }) => {
7
+ return {
8
+ text: `[img-1]\n\n${instruction}`,
9
+ images: { "1": image },
10
+ };
11
+ },
12
+ stopSequences: [],
13
+ };
14
+ }
15
+ exports.mapVisionInstructionPromptToLlamaCppFormat = mapVisionInstructionPromptToLlamaCppFormat;
@@ -0,0 +1,4 @@
1
+ import { TextGenerationPromptFormat } from "../../model-function/generate-text/TextGenerationPromptFormat.js";
2
+ import { VisionInstructionPrompt } from "../../model-function/generate-text/prompt-format/VisionInstructionPrompt.js";
3
+ import { LlamaCppTextGenerationPrompt } from "./LlamaCppTextGenerationModel.js";
4
+ export declare function mapVisionInstructionPromptToLlamaCppFormat(): TextGenerationPromptFormat<VisionInstructionPrompt, LlamaCppTextGenerationPrompt>;
@@ -0,0 +1,11 @@
1
+ export function mapVisionInstructionPromptToLlamaCppFormat() {
2
+ return {
3
+ format: ({ instruction, image }) => {
4
+ return {
5
+ text: `[img-1]\n\n${instruction}`,
6
+ images: { "1": image },
7
+ };
8
+ },
9
+ stopSequences: [],
10
+ };
11
+ }
@@ -6,7 +6,7 @@ const callWithRetryAndThrottle_js_1 = require("../../core/api/callWithRetryAndTh
6
6
  const postToApi_js_1 = require("../../core/api/postToApi.cjs");
7
7
  const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
8
8
  const PromptFormatTextStreamingModel_js_1 = require("../../model-function/generate-text/PromptFormatTextStreamingModel.cjs");
9
- const TextPromptFormat_js_1 = require("../../model-function/generate-text/TextPromptFormat.cjs");
9
+ const TextPromptFormat_js_1 = require("../../model-function/generate-text/prompt-format/TextPromptFormat.cjs");
10
10
  const countTokens_js_1 = require("../../model-function/tokenize-text/countTokens.cjs");
11
11
  const AsyncQueue_js_1 = require("../../util/AsyncQueue.cjs");
12
12
  const parseJSON_js_1 = require("../../util/parseJSON.cjs");
@@ -3,7 +3,7 @@ import { callWithRetryAndThrottle } from "../../core/api/callWithRetryAndThrottl
3
3
  import { createJsonResponseHandler, postJsonToApi, } from "../../core/api/postToApi.js";
4
4
  import { AbstractModel } from "../../model-function/AbstractModel.js";
5
5
  import { PromptFormatTextStreamingModel } from "../../model-function/generate-text/PromptFormatTextStreamingModel.js";
6
- import { mapChatPromptToTextFormat, mapInstructionPromptToTextFormat, } from "../../model-function/generate-text/TextPromptFormat.js";
6
+ import { mapChatPromptToTextFormat, mapInstructionPromptToTextFormat, } from "../../model-function/generate-text/prompt-format/TextPromptFormat.js";
7
7
  import { countTokens } from "../../model-function/tokenize-text/countTokens.js";
8
8
  import { AsyncQueue } from "../../util/AsyncQueue.js";
9
9
  import { parseJsonWithZod } from "../../util/parseJSON.js";
@@ -1,14 +1,37 @@
1
1
  export type OpenAIChatMessage = {
2
- role: "user" | "assistant" | "system";
2
+ role: "system";
3
3
  content: string;
4
4
  name?: string;
5
+ } | {
6
+ role: "user";
7
+ content: string | Array<{
8
+ type: "text";
9
+ text: string;
10
+ } | {
11
+ type: "image_url";
12
+ image_url: string;
13
+ }>;
14
+ name?: string;
5
15
  } | {
6
16
  role: "assistant";
7
17
  content: string | null;
8
- function_call: {
18
+ name?: string;
19
+ tool_calls?: Array<{
20
+ id: string;
21
+ type: "function";
22
+ function: {
23
+ name: string;
24
+ arguments: string;
25
+ };
26
+ }>;
27
+ function_call?: {
9
28
  name: string;
10
29
  arguments: string;
11
30
  };
31
+ } | {
32
+ role: "tool";
33
+ tool_call_id: string;
34
+ content: string | null;
12
35
  } | {
13
36
  role: "function";
14
37
  content: string;
@@ -364,6 +364,9 @@ class OpenAIChatModel extends AbstractModel_js_1.AbstractModel {
364
364
  withInstructionPrompt() {
365
365
  return this.withPromptFormat((0, OpenAIChatPromptFormat_js_1.mapInstructionPromptToOpenAIChatFormat)());
366
366
  }
367
+ withVisionInstructionPrompt() {
368
+ return this.withPromptFormat((0, OpenAIChatPromptFormat_js_1.mapVisionInstructionPromptToOpenAIChatFormat)());
369
+ }
367
370
  /**
368
371
  * Returns this model with a chat prompt format.
369
372
  */
@@ -334,6 +334,7 @@ export declare class OpenAIChatModel extends AbstractModel<OpenAIChatSettings> i
334
334
  * Returns this model with an instruction prompt format.
335
335
  */
336
336
  withInstructionPrompt(): PromptFormatTextStreamingModel<import("../../../index.js").InstructionPrompt, OpenAIChatMessage[], OpenAIChatSettings, this>;
337
+ withVisionInstructionPrompt(): PromptFormatTextStreamingModel<import("../../../index.js").VisionInstructionPrompt, OpenAIChatMessage[], OpenAIChatSettings, this>;
337
338
  /**
338
339
  * Returns this model with a chat prompt format.
339
340
  */
@@ -9,7 +9,7 @@ import { PromptFormatTextStreamingModel } from "../../../model-function/generate
9
9
  import { OpenAIApiConfiguration } from "../OpenAIApiConfiguration.js";
10
10
  import { failedOpenAICallResponseHandler } from "../OpenAIError.js";
11
11
  import { TikTokenTokenizer } from "../TikTokenTokenizer.js";
12
- import { mapChatPromptToOpenAIChatFormat, mapInstructionPromptToOpenAIChatFormat, } from "./OpenAIChatPromptFormat.js";
12
+ import { mapChatPromptToOpenAIChatFormat, mapInstructionPromptToOpenAIChatFormat, mapVisionInstructionPromptToOpenAIChatFormat, } from "./OpenAIChatPromptFormat.js";
13
13
  import { createOpenAIChatDeltaIterableQueue } from "./OpenAIChatStreamIterable.js";
14
14
  import { countOpenAIChatPromptTokens } from "./countOpenAIChatMessageTokens.js";
15
15
  /*
@@ -355,6 +355,9 @@ export class OpenAIChatModel extends AbstractModel {
355
355
  withInstructionPrompt() {
356
356
  return this.withPromptFormat(mapInstructionPromptToOpenAIChatFormat());
357
357
  }
358
+ withVisionInstructionPrompt() {
359
+ return this.withPromptFormat(mapVisionInstructionPromptToOpenAIChatFormat());
360
+ }
358
361
  /**
359
362
  * Returns this model with a chat prompt format.
360
363
  */
@@ -1,7 +1,7 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.mapChatPromptToOpenAIChatFormat = exports.mapInstructionPromptToOpenAIChatFormat = void 0;
4
- const validateChatPrompt_js_1 = require("../../../model-function/generate-text/validateChatPrompt.cjs");
3
+ exports.mapChatPromptToOpenAIChatFormat = exports.mapVisionInstructionPromptToOpenAIChatFormat = exports.mapInstructionPromptToOpenAIChatFormat = void 0;
4
+ const validateChatPrompt_js_1 = require("../../../model-function/generate-text/prompt-format/validateChatPrompt.cjs");
5
5
  /**
6
6
  * Formats an instruction prompt as an OpenAI chat prompt.
7
7
  */
@@ -31,6 +31,29 @@ function mapInstructionPromptToOpenAIChatFormat() {
31
31
  };
32
32
  }
33
33
  exports.mapInstructionPromptToOpenAIChatFormat = mapInstructionPromptToOpenAIChatFormat;
34
+ /**
35
+ * Formats a version prompt as an OpenAI chat prompt.
36
+ */
37
+ function mapVisionInstructionPromptToOpenAIChatFormat() {
38
+ return {
39
+ format: ({ instruction, image, mimeType }) => {
40
+ return [
41
+ {
42
+ role: "user",
43
+ content: [
44
+ { type: "text", text: instruction },
45
+ {
46
+ type: "image_url",
47
+ image_url: `data:${mimeType ?? "image/jpeg"};base64,${image}`,
48
+ },
49
+ ],
50
+ },
51
+ ];
52
+ },
53
+ stopSequences: [],
54
+ };
55
+ }
56
+ exports.mapVisionInstructionPromptToOpenAIChatFormat = mapVisionInstructionPromptToOpenAIChatFormat;
34
57
  /**
35
58
  * Formats a chat prompt as an OpenAI chat prompt.
36
59
  */
@@ -1,11 +1,16 @@
1
- import { ChatPrompt } from "../../../model-function/generate-text/ChatPrompt.js";
2
- import { InstructionPrompt } from "../../../model-function/generate-text/InstructionPrompt.js";
1
+ import { ChatPrompt } from "../../../model-function/generate-text/prompt-format/ChatPrompt.js";
2
+ import { InstructionPrompt } from "../../../model-function/generate-text/prompt-format/InstructionPrompt.js";
3
3
  import { TextGenerationPromptFormat } from "../../../model-function/generate-text/TextGenerationPromptFormat.js";
4
4
  import { OpenAIChatMessage } from "./OpenAIChatMessage.js";
5
+ import { VisionInstructionPrompt } from "../../../model-function/generate-text/prompt-format/VisionInstructionPrompt.js";
5
6
  /**
6
7
  * Formats an instruction prompt as an OpenAI chat prompt.
7
8
  */
8
9
  export declare function mapInstructionPromptToOpenAIChatFormat(): TextGenerationPromptFormat<InstructionPrompt, Array<OpenAIChatMessage>>;
10
+ /**
11
+ * Formats a version prompt as an OpenAI chat prompt.
12
+ */
13
+ export declare function mapVisionInstructionPromptToOpenAIChatFormat(): TextGenerationPromptFormat<VisionInstructionPrompt, Array<OpenAIChatMessage>>;
9
14
  /**
10
15
  * Formats a chat prompt as an OpenAI chat prompt.
11
16
  */
@@ -1,4 +1,4 @@
1
- import { validateChatPrompt } from "../../../model-function/generate-text/validateChatPrompt.js";
1
+ import { validateChatPrompt } from "../../../model-function/generate-text/prompt-format/validateChatPrompt.js";
2
2
  /**
3
3
  * Formats an instruction prompt as an OpenAI chat prompt.
4
4
  */
@@ -27,6 +27,28 @@ export function mapInstructionPromptToOpenAIChatFormat() {
27
27
  stopSequences: [],
28
28
  };
29
29
  }
30
+ /**
31
+ * Formats a version prompt as an OpenAI chat prompt.
32
+ */
33
+ export function mapVisionInstructionPromptToOpenAIChatFormat() {
34
+ return {
35
+ format: ({ instruction, image, mimeType }) => {
36
+ return [
37
+ {
38
+ role: "user",
39
+ content: [
40
+ { type: "text", text: instruction },
41
+ {
42
+ type: "image_url",
43
+ image_url: `data:${mimeType ?? "image/jpeg"};base64,${image}`,
44
+ },
45
+ ],
46
+ },
47
+ ];
48
+ },
49
+ stopSequences: [],
50
+ };
51
+ }
30
52
  /**
31
53
  * Formats a chat prompt as an OpenAI chat prompt.
32
54
  */
@@ -15,10 +15,26 @@ exports.OPENAI_CHAT_PROMPT_BASE_TOKEN_COUNT = 2;
15
15
  */
16
16
  exports.OPENAI_CHAT_MESSAGE_BASE_TOKEN_COUNT = 5;
17
17
  async function countOpenAIChatMessageTokens({ message, model, }) {
18
- const contentTokenCount = await (0, countTokens_js_1.countTokens)(new TikTokenTokenizer_js_1.TikTokenTokenizer({
18
+ const tokenizer = new TikTokenTokenizer_js_1.TikTokenTokenizer({
19
19
  model: (0, OpenAIChatModel_js_1.getOpenAIChatModelInformation)(model).baseModel,
20
- }), message.content ?? "");
21
- return exports.OPENAI_CHAT_MESSAGE_BASE_TOKEN_COUNT + contentTokenCount;
20
+ });
21
+ // case: function call without content
22
+ if (message.content == null) {
23
+ return exports.OPENAI_CHAT_MESSAGE_BASE_TOKEN_COUNT;
24
+ }
25
+ // case: simple text content
26
+ if (typeof message.content === "string") {
27
+ return (exports.OPENAI_CHAT_MESSAGE_BASE_TOKEN_COUNT +
28
+ (await (0, countTokens_js_1.countTokens)(tokenizer, message.content)));
29
+ }
30
+ // case: array of content objects
31
+ let contentTokenCount = exports.OPENAI_CHAT_MESSAGE_BASE_TOKEN_COUNT;
32
+ for (const content of message.content) {
33
+ if (content.type === "text") {
34
+ contentTokenCount += await (0, countTokens_js_1.countTokens)(tokenizer, content.text);
35
+ }
36
+ }
37
+ return contentTokenCount;
22
38
  }
23
39
  exports.countOpenAIChatMessageTokens = countOpenAIChatMessageTokens;
24
40
  async function countOpenAIChatPromptTokens({ messages, model, }) {
@@ -12,10 +12,26 @@ export const OPENAI_CHAT_PROMPT_BASE_TOKEN_COUNT = 2;
12
12
  */
13
13
  export const OPENAI_CHAT_MESSAGE_BASE_TOKEN_COUNT = 5;
14
14
  export async function countOpenAIChatMessageTokens({ message, model, }) {
15
- const contentTokenCount = await countTokens(new TikTokenTokenizer({
15
+ const tokenizer = new TikTokenTokenizer({
16
16
  model: getOpenAIChatModelInformation(model).baseModel,
17
- }), message.content ?? "");
18
- return OPENAI_CHAT_MESSAGE_BASE_TOKEN_COUNT + contentTokenCount;
17
+ });
18
+ // case: function call without content
19
+ if (message.content == null) {
20
+ return OPENAI_CHAT_MESSAGE_BASE_TOKEN_COUNT;
21
+ }
22
+ // case: simple text content
23
+ if (typeof message.content === "string") {
24
+ return (OPENAI_CHAT_MESSAGE_BASE_TOKEN_COUNT +
25
+ (await countTokens(tokenizer, message.content)));
26
+ }
27
+ // case: array of content objects
28
+ let contentTokenCount = OPENAI_CHAT_MESSAGE_BASE_TOKEN_COUNT;
29
+ for (const content of message.content) {
30
+ if (content.type === "text") {
31
+ contentTokenCount += await countTokens(tokenizer, content.text);
32
+ }
33
+ }
34
+ return contentTokenCount;
19
35
  }
20
36
  export async function countOpenAIChatPromptTokens({ messages, model, }) {
21
37
  let tokens = OPENAI_CHAT_PROMPT_BASE_TOKEN_COUNT;
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "modelfusion",
3
3
  "description": "Build multimodal applications, chatbots, and agents with JavaScript and TypeScript.",
4
- "version": "0.63.0",
4
+ "version": "0.64.0",
5
5
  "author": "Lars Grammel",
6
6
  "license": "MIT",
7
7
  "keywords": [