modelfusion 0.62.0 → 0.64.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. package/model-function/generate-text/index.cjs +9 -8
  2. package/model-function/generate-text/index.d.ts +9 -8
  3. package/model-function/generate-text/index.js +9 -8
  4. package/model-function/generate-text/{AlpacaPromptFormat.d.ts → prompt-format/AlpacaPromptFormat.d.ts} +1 -1
  5. package/model-function/generate-text/{Llama2PromptFormat.d.ts → prompt-format/Llama2PromptFormat.d.ts} +1 -1
  6. package/model-function/generate-text/{TextPromptFormat.d.ts → prompt-format/TextPromptFormat.d.ts} +1 -1
  7. package/model-function/generate-text/{VicunaPromptFormat.d.ts → prompt-format/VicunaPromptFormat.d.ts} +1 -1
  8. package/model-function/generate-text/prompt-format/VisionInstructionPrompt.cjs +2 -0
  9. package/model-function/generate-text/prompt-format/VisionInstructionPrompt.d.ts +31 -0
  10. package/model-function/generate-text/prompt-format/VisionInstructionPrompt.js +1 -0
  11. package/model-function/generate-text/{trimChatPrompt.d.ts → prompt-format/trimChatPrompt.d.ts} +1 -1
  12. package/model-provider/anthropic/AnthropicPromptFormat.cjs +1 -1
  13. package/model-provider/anthropic/AnthropicPromptFormat.d.ts +2 -2
  14. package/model-provider/anthropic/AnthropicPromptFormat.js +1 -1
  15. package/model-provider/cohere/CohereTextGenerationModel.cjs +1 -1
  16. package/model-provider/cohere/CohereTextGenerationModel.js +1 -1
  17. package/model-provider/llamacpp/LlamaCppTextGenerationModel.cjs +4 -0
  18. package/model-provider/llamacpp/LlamaCppTextGenerationModel.d.ts +1 -0
  19. package/model-provider/llamacpp/LlamaCppTextGenerationModel.js +4 -0
  20. package/model-provider/llamacpp/mapVisionInstructionPromptToLlamaCppFormat.cjs +15 -0
  21. package/model-provider/llamacpp/mapVisionInstructionPromptToLlamaCppFormat.d.ts +4 -0
  22. package/model-provider/llamacpp/mapVisionInstructionPromptToLlamaCppFormat.js +11 -0
  23. package/model-provider/openai/OpenAICompletionModel.cjs +1 -1
  24. package/model-provider/openai/OpenAICompletionModel.js +1 -1
  25. package/model-provider/openai/OpenAICostCalculator.cjs +6 -2
  26. package/model-provider/openai/OpenAICostCalculator.js +6 -2
  27. package/model-provider/openai/OpenAIImageGenerationModel.cjs +2 -4
  28. package/model-provider/openai/OpenAIImageGenerationModel.d.ts +2 -1
  29. package/model-provider/openai/OpenAIImageGenerationModel.js +2 -4
  30. package/model-provider/openai/chat/OpenAIChatMessage.d.ts +25 -2
  31. package/model-provider/openai/chat/OpenAIChatModel.cjs +15 -2
  32. package/model-provider/openai/chat/OpenAIChatModel.d.ts +23 -10
  33. package/model-provider/openai/chat/OpenAIChatModel.js +16 -3
  34. package/model-provider/openai/chat/OpenAIChatPromptFormat.cjs +25 -2
  35. package/model-provider/openai/chat/OpenAIChatPromptFormat.d.ts +7 -2
  36. package/model-provider/openai/chat/OpenAIChatPromptFormat.js +23 -1
  37. package/model-provider/openai/chat/OpenAIChatStreamIterable.cjs +10 -1
  38. package/model-provider/openai/chat/OpenAIChatStreamIterable.js +10 -1
  39. package/model-provider/openai/chat/countOpenAIChatMessageTokens.cjs +19 -3
  40. package/model-provider/openai/chat/countOpenAIChatMessageTokens.js +19 -3
  41. package/package.json +1 -1
  42. /package/model-function/generate-text/{AlpacaPromptFormat.cjs → prompt-format/AlpacaPromptFormat.cjs} +0 -0
  43. /package/model-function/generate-text/{AlpacaPromptFormat.js → prompt-format/AlpacaPromptFormat.js} +0 -0
  44. /package/model-function/generate-text/{ChatPrompt.cjs → prompt-format/ChatPrompt.cjs} +0 -0
  45. /package/model-function/generate-text/{ChatPrompt.d.ts → prompt-format/ChatPrompt.d.ts} +0 -0
  46. /package/model-function/generate-text/{ChatPrompt.js → prompt-format/ChatPrompt.js} +0 -0
  47. /package/model-function/generate-text/{InstructionPrompt.cjs → prompt-format/InstructionPrompt.cjs} +0 -0
  48. /package/model-function/generate-text/{InstructionPrompt.d.ts → prompt-format/InstructionPrompt.d.ts} +0 -0
  49. /package/model-function/generate-text/{InstructionPrompt.js → prompt-format/InstructionPrompt.js} +0 -0
  50. /package/model-function/generate-text/{Llama2PromptFormat.cjs → prompt-format/Llama2PromptFormat.cjs} +0 -0
  51. /package/model-function/generate-text/{Llama2PromptFormat.js → prompt-format/Llama2PromptFormat.js} +0 -0
  52. /package/model-function/generate-text/{TextPromptFormat.cjs → prompt-format/TextPromptFormat.cjs} +0 -0
  53. /package/model-function/generate-text/{TextPromptFormat.js → prompt-format/TextPromptFormat.js} +0 -0
  54. /package/model-function/generate-text/{VicunaPromptFormat.cjs → prompt-format/VicunaPromptFormat.cjs} +0 -0
  55. /package/model-function/generate-text/{VicunaPromptFormat.js → prompt-format/VicunaPromptFormat.js} +0 -0
  56. /package/model-function/generate-text/{trimChatPrompt.cjs → prompt-format/trimChatPrompt.cjs} +0 -0
  57. /package/model-function/generate-text/{trimChatPrompt.js → prompt-format/trimChatPrompt.js} +0 -0
  58. /package/model-function/generate-text/{validateChatPrompt.cjs → prompt-format/validateChatPrompt.cjs} +0 -0
  59. /package/model-function/generate-text/{validateChatPrompt.d.ts → prompt-format/validateChatPrompt.d.ts} +0 -0
  60. /package/model-function/generate-text/{validateChatPrompt.js → prompt-format/validateChatPrompt.js} +0 -0
@@ -14,18 +14,19 @@ var __exportStar = (this && this.__exportStar) || function(m, exports) {
14
14
  for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
15
15
  };
16
16
  Object.defineProperty(exports, "__esModule", { value: true });
17
- __exportStar(require("./AlpacaPromptFormat.cjs"), exports);
18
- __exportStar(require("./ChatPrompt.cjs"), exports);
19
- __exportStar(require("./InstructionPrompt.cjs"), exports);
20
- __exportStar(require("./Llama2PromptFormat.cjs"), exports);
21
17
  __exportStar(require("./PromptFormatTextGenerationModel.cjs"), exports);
22
18
  __exportStar(require("./PromptFormatTextStreamingModel.cjs"), exports);
23
19
  __exportStar(require("./TextGenerationEvent.cjs"), exports);
24
20
  __exportStar(require("./TextGenerationModel.cjs"), exports);
25
21
  __exportStar(require("./TextGenerationPromptFormat.cjs"), exports);
26
- __exportStar(require("./TextPromptFormat.cjs"), exports);
27
- __exportStar(require("./VicunaPromptFormat.cjs"), exports);
28
22
  __exportStar(require("./generateText.cjs"), exports);
23
+ __exportStar(require("./prompt-format/AlpacaPromptFormat.cjs"), exports);
24
+ __exportStar(require("./prompt-format/ChatPrompt.cjs"), exports);
25
+ __exportStar(require("./prompt-format/InstructionPrompt.cjs"), exports);
26
+ __exportStar(require("./prompt-format/Llama2PromptFormat.cjs"), exports);
27
+ __exportStar(require("./prompt-format/TextPromptFormat.cjs"), exports);
28
+ __exportStar(require("./prompt-format/VicunaPromptFormat.cjs"), exports);
29
+ __exportStar(require("./prompt-format/VisionInstructionPrompt.cjs"), exports);
30
+ __exportStar(require("./prompt-format/trimChatPrompt.cjs"), exports);
31
+ __exportStar(require("./prompt-format/validateChatPrompt.cjs"), exports);
29
32
  __exportStar(require("./streamText.cjs"), exports);
30
- __exportStar(require("./trimChatPrompt.cjs"), exports);
31
- __exportStar(require("./validateChatPrompt.cjs"), exports);
@@ -1,15 +1,16 @@
1
- export * from "./AlpacaPromptFormat.js";
2
- export * from "./ChatPrompt.js";
3
- export * from "./InstructionPrompt.js";
4
- export * from "./Llama2PromptFormat.js";
5
1
  export * from "./PromptFormatTextGenerationModel.js";
6
2
  export * from "./PromptFormatTextStreamingModel.js";
7
3
  export * from "./TextGenerationEvent.js";
8
4
  export * from "./TextGenerationModel.js";
9
5
  export * from "./TextGenerationPromptFormat.js";
10
- export * from "./TextPromptFormat.js";
11
- export * from "./VicunaPromptFormat.js";
12
6
  export * from "./generateText.js";
7
+ export * from "./prompt-format/AlpacaPromptFormat.js";
8
+ export * from "./prompt-format/ChatPrompt.js";
9
+ export * from "./prompt-format/InstructionPrompt.js";
10
+ export * from "./prompt-format/Llama2PromptFormat.js";
11
+ export * from "./prompt-format/TextPromptFormat.js";
12
+ export * from "./prompt-format/VicunaPromptFormat.js";
13
+ export * from "./prompt-format/VisionInstructionPrompt.js";
14
+ export * from "./prompt-format/trimChatPrompt.js";
15
+ export * from "./prompt-format/validateChatPrompt.js";
13
16
  export * from "./streamText.js";
14
- export * from "./trimChatPrompt.js";
15
- export * from "./validateChatPrompt.js";
@@ -1,15 +1,16 @@
1
- export * from "./AlpacaPromptFormat.js";
2
- export * from "./ChatPrompt.js";
3
- export * from "./InstructionPrompt.js";
4
- export * from "./Llama2PromptFormat.js";
5
1
  export * from "./PromptFormatTextGenerationModel.js";
6
2
  export * from "./PromptFormatTextStreamingModel.js";
7
3
  export * from "./TextGenerationEvent.js";
8
4
  export * from "./TextGenerationModel.js";
9
5
  export * from "./TextGenerationPromptFormat.js";
10
- export * from "./TextPromptFormat.js";
11
- export * from "./VicunaPromptFormat.js";
12
6
  export * from "./generateText.js";
7
+ export * from "./prompt-format/AlpacaPromptFormat.js";
8
+ export * from "./prompt-format/ChatPrompt.js";
9
+ export * from "./prompt-format/InstructionPrompt.js";
10
+ export * from "./prompt-format/Llama2PromptFormat.js";
11
+ export * from "./prompt-format/TextPromptFormat.js";
12
+ export * from "./prompt-format/VicunaPromptFormat.js";
13
+ export * from "./prompt-format/VisionInstructionPrompt.js";
14
+ export * from "./prompt-format/trimChatPrompt.js";
15
+ export * from "./prompt-format/validateChatPrompt.js";
13
16
  export * from "./streamText.js";
14
- export * from "./trimChatPrompt.js";
15
- export * from "./validateChatPrompt.js";
@@ -1,5 +1,5 @@
1
1
  import { InstructionPrompt } from "./InstructionPrompt.js";
2
- import { TextGenerationPromptFormat } from "./TextGenerationPromptFormat.js";
2
+ import { TextGenerationPromptFormat } from "../TextGenerationPromptFormat.js";
3
3
  /**
4
4
  * Formats an instruction prompt as an Alpaca prompt.
5
5
  *
@@ -1,6 +1,6 @@
1
1
  import { ChatPrompt } from "./ChatPrompt.js";
2
2
  import { InstructionPrompt } from "./InstructionPrompt.js";
3
- import { TextGenerationPromptFormat } from "./TextGenerationPromptFormat.js";
3
+ import { TextGenerationPromptFormat } from "../TextGenerationPromptFormat.js";
4
4
  /**
5
5
  * Formats an instruction prompt as a Llama 2 prompt.
6
6
  *
@@ -1,6 +1,6 @@
1
1
  import { ChatPrompt } from "./ChatPrompt.js";
2
2
  import { InstructionPrompt } from "./InstructionPrompt.js";
3
- import { TextGenerationPromptFormat } from "./TextGenerationPromptFormat.js";
3
+ import { TextGenerationPromptFormat } from "../TextGenerationPromptFormat.js";
4
4
  /**
5
5
  * Formats an instruction prompt as a basic text prompt.
6
6
  */
@@ -1,5 +1,5 @@
1
1
  import { ChatPrompt } from "./ChatPrompt.js";
2
- import { TextGenerationPromptFormat } from "./TextGenerationPromptFormat.js";
2
+ import { TextGenerationPromptFormat } from "../TextGenerationPromptFormat.js";
3
3
  /**
4
4
  * Formats a chat prompt as a Vicuna prompt.
5
5
  *
@@ -0,0 +1,2 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
@@ -0,0 +1,31 @@
1
+ /**
2
+ * A single instruction version prompt. It contains an instruction, a base64 encoded image
3
+ * and an optional mime type of the image.
4
+ *
5
+ * If no mime type is provided, the mime type default to "image/jpeg".
6
+ *
7
+ * @example
8
+ * ```ts
9
+ * {
10
+ * instruction: "Describe the image in detail:",
11
+ * image: fs.readFileSync(path.join("data", "example-image.png"), {
12
+ * encoding: "base64",
13
+ * }),
14
+ * mimeType: "image/png"
15
+ * }
16
+ * ```
17
+ */
18
+ export type VisionInstructionPrompt = {
19
+ /**
20
+ * The instruction for the model.
21
+ */
22
+ instruction: string;
23
+ /**
24
+ * Base-64 encoded image.
25
+ */
26
+ image: string;
27
+ /**
28
+ * Optional mime type of the image.
29
+ */
30
+ mimeType?: string;
31
+ };
@@ -1,5 +1,5 @@
1
1
  import { ChatPrompt } from "./ChatPrompt.js";
2
- import { HasContextWindowSize, HasTokenizer, TextGenerationModel, TextGenerationModelSettings } from "./TextGenerationModel.js";
2
+ import { HasContextWindowSize, HasTokenizer, TextGenerationModel, TextGenerationModelSettings } from "../TextGenerationModel.js";
3
3
  /**
4
4
  * Keeps only the most recent messages in the prompt, while leaving enough space for the completion.
5
5
  *
@@ -1,7 +1,7 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.mapChatPromptToAnthropicFormat = exports.mapInstructionPromptToAnthropicFormat = void 0;
4
- const validateChatPrompt_js_1 = require("../../model-function/generate-text/validateChatPrompt.cjs");
4
+ const validateChatPrompt_js_1 = require("../../model-function/generate-text/prompt-format/validateChatPrompt.cjs");
5
5
  /**
6
6
  * Formats an instruction prompt as an Anthropic prompt.
7
7
  */
@@ -1,5 +1,5 @@
1
- import { ChatPrompt } from "../../model-function/generate-text/ChatPrompt.js";
2
- import { InstructionPrompt } from "../../model-function/generate-text/InstructionPrompt.js";
1
+ import { ChatPrompt } from "../../model-function/generate-text/prompt-format/ChatPrompt.js";
2
+ import { InstructionPrompt } from "../../model-function/generate-text/prompt-format/InstructionPrompt.js";
3
3
  import { TextGenerationPromptFormat } from "../../model-function/generate-text/TextGenerationPromptFormat.js";
4
4
  /**
5
5
  * Formats an instruction prompt as an Anthropic prompt.
@@ -1,4 +1,4 @@
1
- import { validateChatPrompt } from "../../model-function/generate-text/validateChatPrompt.js";
1
+ import { validateChatPrompt } from "../../model-function/generate-text/prompt-format/validateChatPrompt.js";
2
2
  /**
3
3
  * Formats an instruction prompt as an Anthropic prompt.
4
4
  */
@@ -6,7 +6,7 @@ const callWithRetryAndThrottle_js_1 = require("../../core/api/callWithRetryAndTh
6
6
  const postToApi_js_1 = require("../../core/api/postToApi.cjs");
7
7
  const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
8
8
  const PromptFormatTextStreamingModel_js_1 = require("../../model-function/generate-text/PromptFormatTextStreamingModel.cjs");
9
- const TextPromptFormat_js_1 = require("../../model-function/generate-text/TextPromptFormat.cjs");
9
+ const TextPromptFormat_js_1 = require("../../model-function/generate-text/prompt-format/TextPromptFormat.cjs");
10
10
  const countTokens_js_1 = require("../../model-function/tokenize-text/countTokens.cjs");
11
11
  const AsyncQueue_js_1 = require("../../util/AsyncQueue.cjs");
12
12
  const parseJsonStream_js_1 = require("../../util/streaming/parseJsonStream.cjs");
@@ -3,7 +3,7 @@ import { callWithRetryAndThrottle } from "../../core/api/callWithRetryAndThrottl
3
3
  import { createJsonResponseHandler, postJsonToApi, } from "../../core/api/postToApi.js";
4
4
  import { AbstractModel } from "../../model-function/AbstractModel.js";
5
5
  import { PromptFormatTextStreamingModel } from "../../model-function/generate-text/PromptFormatTextStreamingModel.js";
6
- import { mapChatPromptToTextFormat, mapInstructionPromptToTextFormat, } from "../../model-function/generate-text/TextPromptFormat.js";
6
+ import { mapChatPromptToTextFormat, mapInstructionPromptToTextFormat, } from "../../model-function/generate-text/prompt-format/TextPromptFormat.js";
7
7
  import { countTokens } from "../../model-function/tokenize-text/countTokens.js";
8
8
  import { AsyncQueue } from "../../util/AsyncQueue.js";
9
9
  import { parseJsonStream } from "../../util/streaming/parseJsonStream.js";
@@ -12,6 +12,7 @@ const parseJSON_js_1 = require("../../util/parseJSON.cjs");
12
12
  const LlamaCppApiConfiguration_js_1 = require("./LlamaCppApiConfiguration.cjs");
13
13
  const LlamaCppError_js_1 = require("./LlamaCppError.cjs");
14
14
  const LlamaCppTokenizer_js_1 = require("./LlamaCppTokenizer.cjs");
15
+ const mapVisionInstructionPromptToLlamaCppFormat_js_1 = require("./mapVisionInstructionPromptToLlamaCppFormat.cjs");
15
16
  class LlamaCppTextGenerationModel extends AbstractModel_js_1.AbstractModel {
16
17
  constructor(settings = {}) {
17
18
  super({ settings });
@@ -107,6 +108,9 @@ class LlamaCppTextGenerationModel extends AbstractModel_js_1.AbstractModel {
107
108
  stopSequences: [],
108
109
  });
109
110
  }
111
+ withVisionInstructionPrompt() {
112
+ return this.withPromptFormat((0, mapVisionInstructionPromptToLlamaCppFormat_js_1.mapVisionInstructionPromptToLlamaCppFormat)());
113
+ }
110
114
  withPromptFormat(promptFormat) {
111
115
  return new PromptFormatTextStreamingModel_js_1.PromptFormatTextStreamingModel({
112
116
  model: this.withSettings({
@@ -111,6 +111,7 @@ export declare class LlamaCppTextGenerationModel<CONTEXT_WINDOW_SIZE extends num
111
111
  }>;
112
112
  doStreamText(prompt: LlamaCppTextGenerationPrompt, options?: FunctionOptions): Promise<AsyncIterable<Delta<string>>>;
113
113
  withTextPrompt(): PromptFormatTextStreamingModel<string, LlamaCppTextGenerationPrompt, LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>, this>;
114
+ withVisionInstructionPrompt(): PromptFormatTextStreamingModel<import("../../index.js").VisionInstructionPrompt, LlamaCppTextGenerationPrompt, LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>, this>;
114
115
  withPromptFormat<INPUT_PROMPT>(promptFormat: TextGenerationPromptFormat<INPUT_PROMPT, LlamaCppTextGenerationPrompt>): PromptFormatTextStreamingModel<INPUT_PROMPT, LlamaCppTextGenerationPrompt, LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>, this>;
115
116
  withSettings(additionalSettings: Partial<LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>>): this;
116
117
  }
@@ -9,6 +9,7 @@ import { parseJsonWithZod } from "../../util/parseJSON.js";
9
9
  import { LlamaCppApiConfiguration } from "./LlamaCppApiConfiguration.js";
10
10
  import { failedLlamaCppCallResponseHandler } from "./LlamaCppError.js";
11
11
  import { LlamaCppTokenizer } from "./LlamaCppTokenizer.js";
12
+ import { mapVisionInstructionPromptToLlamaCppFormat } from "./mapVisionInstructionPromptToLlamaCppFormat.js";
12
13
  export class LlamaCppTextGenerationModel extends AbstractModel {
13
14
  constructor(settings = {}) {
14
15
  super({ settings });
@@ -104,6 +105,9 @@ export class LlamaCppTextGenerationModel extends AbstractModel {
104
105
  stopSequences: [],
105
106
  });
106
107
  }
108
+ withVisionInstructionPrompt() {
109
+ return this.withPromptFormat(mapVisionInstructionPromptToLlamaCppFormat());
110
+ }
107
111
  withPromptFormat(promptFormat) {
108
112
  return new PromptFormatTextStreamingModel({
109
113
  model: this.withSettings({
@@ -0,0 +1,15 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.mapVisionInstructionPromptToLlamaCppFormat = void 0;
4
+ function mapVisionInstructionPromptToLlamaCppFormat() {
5
+ return {
6
+ format: ({ instruction, image }) => {
7
+ return {
8
+ text: `[img-1]\n\n${instruction}`,
9
+ images: { "1": image },
10
+ };
11
+ },
12
+ stopSequences: [],
13
+ };
14
+ }
15
+ exports.mapVisionInstructionPromptToLlamaCppFormat = mapVisionInstructionPromptToLlamaCppFormat;
@@ -0,0 +1,4 @@
1
+ import { TextGenerationPromptFormat } from "../../model-function/generate-text/TextGenerationPromptFormat.js";
2
+ import { VisionInstructionPrompt } from "../../model-function/generate-text/prompt-format/VisionInstructionPrompt.js";
3
+ import { LlamaCppTextGenerationPrompt } from "./LlamaCppTextGenerationModel.js";
4
+ export declare function mapVisionInstructionPromptToLlamaCppFormat(): TextGenerationPromptFormat<VisionInstructionPrompt, LlamaCppTextGenerationPrompt>;
@@ -0,0 +1,11 @@
1
+ export function mapVisionInstructionPromptToLlamaCppFormat() {
2
+ return {
3
+ format: ({ instruction, image }) => {
4
+ return {
5
+ text: `[img-1]\n\n${instruction}`,
6
+ images: { "1": image },
7
+ };
8
+ },
9
+ stopSequences: [],
10
+ };
11
+ }
@@ -6,7 +6,7 @@ const callWithRetryAndThrottle_js_1 = require("../../core/api/callWithRetryAndTh
6
6
  const postToApi_js_1 = require("../../core/api/postToApi.cjs");
7
7
  const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
8
8
  const PromptFormatTextStreamingModel_js_1 = require("../../model-function/generate-text/PromptFormatTextStreamingModel.cjs");
9
- const TextPromptFormat_js_1 = require("../../model-function/generate-text/TextPromptFormat.cjs");
9
+ const TextPromptFormat_js_1 = require("../../model-function/generate-text/prompt-format/TextPromptFormat.cjs");
10
10
  const countTokens_js_1 = require("../../model-function/tokenize-text/countTokens.cjs");
11
11
  const AsyncQueue_js_1 = require("../../util/AsyncQueue.cjs");
12
12
  const parseJSON_js_1 = require("../../util/parseJSON.cjs");
@@ -3,7 +3,7 @@ import { callWithRetryAndThrottle } from "../../core/api/callWithRetryAndThrottl
3
3
  import { createJsonResponseHandler, postJsonToApi, } from "../../core/api/postToApi.js";
4
4
  import { AbstractModel } from "../../model-function/AbstractModel.js";
5
5
  import { PromptFormatTextStreamingModel } from "../../model-function/generate-text/PromptFormatTextStreamingModel.js";
6
- import { mapChatPromptToTextFormat, mapInstructionPromptToTextFormat, } from "../../model-function/generate-text/TextPromptFormat.js";
6
+ import { mapChatPromptToTextFormat, mapInstructionPromptToTextFormat, } from "../../model-function/generate-text/prompt-format/TextPromptFormat.js";
7
7
  import { countTokens } from "../../model-function/tokenize-text/countTokens.js";
8
8
  import { AsyncQueue } from "../../util/AsyncQueue.js";
9
9
  import { parseJsonWithZod } from "../../util/parseJSON.js";
@@ -1,12 +1,12 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.OpenAICostCalculator = void 0;
4
+ const OpenAICompletionModel_js_1 = require("./OpenAICompletionModel.cjs");
4
5
  const OpenAIImageGenerationModel_js_1 = require("./OpenAIImageGenerationModel.cjs");
6
+ const OpenAISpeechModel_js_1 = require("./OpenAISpeechModel.cjs");
5
7
  const OpenAITextEmbeddingModel_js_1 = require("./OpenAITextEmbeddingModel.cjs");
6
- const OpenAICompletionModel_js_1 = require("./OpenAICompletionModel.cjs");
7
8
  const OpenAITranscriptionModel_js_1 = require("./OpenAITranscriptionModel.cjs");
8
9
  const OpenAIChatModel_js_1 = require("./chat/OpenAIChatModel.cjs");
9
- const OpenAISpeechModel_js_1 = require("./OpenAISpeechModel.cjs");
10
10
  class OpenAICostCalculator {
11
11
  constructor() {
12
12
  Object.defineProperty(this, "provider", {
@@ -21,7 +21,11 @@ class OpenAICostCalculator {
21
21
  const model = call.model.modelName;
22
22
  switch (type) {
23
23
  case "generate-image": {
24
+ if (model == null) {
25
+ return null;
26
+ }
24
27
  return (0, OpenAIImageGenerationModel_js_1.calculateOpenAIImageGenerationCostInMillicents)({
28
+ model: model,
25
29
  settings: call.settings,
26
30
  });
27
31
  }
@@ -1,9 +1,9 @@
1
+ import { calculateOpenAICompletionCostInMillicents, isOpenAICompletionModel, } from "./OpenAICompletionModel.js";
1
2
  import { calculateOpenAIImageGenerationCostInMillicents, } from "./OpenAIImageGenerationModel.js";
3
+ import { calculateOpenAISpeechCostInMillicents, } from "./OpenAISpeechModel.js";
2
4
  import { calculateOpenAIEmbeddingCostInMillicents, isOpenAIEmbeddingModel, } from "./OpenAITextEmbeddingModel.js";
3
- import { calculateOpenAICompletionCostInMillicents, isOpenAICompletionModel, } from "./OpenAICompletionModel.js";
4
5
  import { calculateOpenAITranscriptionCostInMillicents, } from "./OpenAITranscriptionModel.js";
5
6
  import { calculateOpenAIChatCostInMillicents, isOpenAIChatModel, } from "./chat/OpenAIChatModel.js";
6
- import { calculateOpenAISpeechCostInMillicents, } from "./OpenAISpeechModel.js";
7
7
  export class OpenAICostCalculator {
8
8
  constructor() {
9
9
  Object.defineProperty(this, "provider", {
@@ -18,7 +18,11 @@ export class OpenAICostCalculator {
18
18
  const model = call.model.modelName;
19
19
  switch (type) {
20
20
  case "generate-image": {
21
+ if (model == null) {
22
+ return null;
23
+ }
21
24
  return calculateOpenAIImageGenerationCostInMillicents({
25
+ model: model,
22
26
  settings: call.settings,
23
27
  });
24
28
  }
@@ -55,9 +55,8 @@ exports.OPENAI_IMAGE_MODELS = {
55
55
  /**
56
56
  * @see https://openai.com/pricing
57
57
  */
58
- const calculateOpenAIImageGenerationCostInMillicents = ({ settings, }) => {
59
- console.log(settings);
60
- const cost = exports.OPENAI_IMAGE_MODELS[settings.model]?.getCost(settings);
58
+ const calculateOpenAIImageGenerationCostInMillicents = ({ model, settings, }) => {
59
+ const cost = exports.OPENAI_IMAGE_MODELS[model]?.getCost(settings);
61
60
  if (cost == null) {
62
61
  return null;
63
62
  }
@@ -106,7 +105,6 @@ class OpenAIImageGenerationModel extends AbstractModel_js_1.AbstractModel {
106
105
  }
107
106
  get settingsForEvent() {
108
107
  const eventSettingProperties = [
109
- "model",
110
108
  "n",
111
109
  "size",
112
110
  "quality",
@@ -17,7 +17,8 @@ export declare const OPENAI_IMAGE_MODELS: {
17
17
  /**
18
18
  * @see https://openai.com/pricing
19
19
  */
20
- export declare const calculateOpenAIImageGenerationCostInMillicents: ({ settings, }: {
20
+ export declare const calculateOpenAIImageGenerationCostInMillicents: ({ model, settings, }: {
21
+ model: OpenAIImageModelType;
21
22
  settings: OpenAIImageGenerationSettings;
22
23
  }) => number | null;
23
24
  export type OpenAIImageModelType = keyof typeof OPENAI_IMAGE_MODELS;
@@ -52,9 +52,8 @@ export const OPENAI_IMAGE_MODELS = {
52
52
  /**
53
53
  * @see https://openai.com/pricing
54
54
  */
55
- export const calculateOpenAIImageGenerationCostInMillicents = ({ settings, }) => {
56
- console.log(settings);
57
- const cost = OPENAI_IMAGE_MODELS[settings.model]?.getCost(settings);
55
+ export const calculateOpenAIImageGenerationCostInMillicents = ({ model, settings, }) => {
56
+ const cost = OPENAI_IMAGE_MODELS[model]?.getCost(settings);
58
57
  if (cost == null) {
59
58
  return null;
60
59
  }
@@ -102,7 +101,6 @@ export class OpenAIImageGenerationModel extends AbstractModel {
102
101
  }
103
102
  get settingsForEvent() {
104
103
  const eventSettingProperties = [
105
- "model",
106
104
  "n",
107
105
  "size",
108
106
  "quality",
@@ -1,14 +1,37 @@
1
1
  export type OpenAIChatMessage = {
2
- role: "user" | "assistant" | "system";
2
+ role: "system";
3
3
  content: string;
4
4
  name?: string;
5
+ } | {
6
+ role: "user";
7
+ content: string | Array<{
8
+ type: "text";
9
+ text: string;
10
+ } | {
11
+ type: "image_url";
12
+ image_url: string;
13
+ }>;
14
+ name?: string;
5
15
  } | {
6
16
  role: "assistant";
7
17
  content: string | null;
8
- function_call: {
18
+ name?: string;
19
+ tool_calls?: Array<{
20
+ id: string;
21
+ type: "function";
22
+ function: {
23
+ name: string;
24
+ arguments: string;
25
+ };
26
+ }>;
27
+ function_call?: {
9
28
  name: string;
10
29
  arguments: string;
11
30
  };
31
+ } | {
32
+ role: "tool";
33
+ tool_call_id: string;
34
+ content: string | null;
12
35
  } | {
13
36
  role: "function";
14
37
  content: string;
@@ -211,6 +211,7 @@ class OpenAIChatModel extends AbstractModel_js_1.AbstractModel {
211
211
  // map to OpenAI API names:
212
212
  stop: this.settings.stopSequences,
213
213
  maxTokens: this.settings.maxCompletionTokens,
214
+ openAIResponseFormat: this.settings.responseFormat,
214
215
  // other settings:
215
216
  user: this.settings.isUserIdForwardingEnabled
216
217
  ? options.run?.userId
@@ -363,6 +364,9 @@ class OpenAIChatModel extends AbstractModel_js_1.AbstractModel {
363
364
  withInstructionPrompt() {
364
365
  return this.withPromptFormat((0, OpenAIChatPromptFormat_js_1.mapInstructionPromptToOpenAIChatFormat)());
365
366
  }
367
+ withVisionInstructionPrompt() {
368
+ return this.withPromptFormat((0, OpenAIChatPromptFormat_js_1.mapVisionInstructionPromptToOpenAIChatFormat)());
369
+ }
366
370
  /**
367
371
  * Returns this model with a chat prompt format.
368
372
  */
@@ -390,6 +394,7 @@ const openAIChatResponseSchema = zod_1.z.object({
390
394
  object: zod_1.z.literal("chat.completion"),
391
395
  created: zod_1.z.number(),
392
396
  model: zod_1.z.string(),
397
+ system_fingerprint: zod_1.z.string(),
393
398
  choices: zod_1.z.array(zod_1.z.object({
394
399
  message: zod_1.z.object({
395
400
  role: zod_1.z.literal("assistant"),
@@ -403,7 +408,13 @@ const openAIChatResponseSchema = zod_1.z.object({
403
408
  }),
404
409
  index: zod_1.z.number(),
405
410
  logprobs: zod_1.z.nullable(zod_1.z.any()),
406
- finish_reason: zod_1.z.string(),
411
+ finish_reason: zod_1.z.enum([
412
+ "stop",
413
+ "length",
414
+ "tool_calls",
415
+ "content_filter",
416
+ "function_call",
417
+ ]),
407
418
  })),
408
419
  usage: zod_1.z.object({
409
420
  prompt_tokens: zod_1.z.number(),
@@ -411,7 +422,7 @@ const openAIChatResponseSchema = zod_1.z.object({
411
422
  total_tokens: zod_1.z.number(),
412
423
  }),
413
424
  });
414
- async function callOpenAIChatCompletionAPI({ api = new OpenAIApiConfiguration_js_1.OpenAIApiConfiguration(), abortSignal, responseFormat, model, messages, functions, functionCall, temperature, topP, n, stop, maxTokens, presencePenalty, frequencyPenalty, logitBias, user, }) {
425
+ async function callOpenAIChatCompletionAPI({ api = new OpenAIApiConfiguration_js_1.OpenAIApiConfiguration(), abortSignal, responseFormat, model, messages, functions, functionCall, temperature, topP, n, stop, maxTokens, presencePenalty, frequencyPenalty, logitBias, user, openAIResponseFormat, seed, }) {
415
426
  // empty arrays are not allowed for stop:
416
427
  if (stop != null && Array.isArray(stop) && stop.length === 0) {
417
428
  stop = undefined;
@@ -433,6 +444,8 @@ async function callOpenAIChatCompletionAPI({ api = new OpenAIApiConfiguration_js
433
444
  presence_penalty: presencePenalty,
434
445
  frequency_penalty: frequencyPenalty,
435
446
  logit_bias: logitBias,
447
+ seed,
448
+ response_format: openAIResponseFormat,
436
449
  user,
437
450
  },
438
451
  failedResponseHandler: OpenAIError_js_1.failedOpenAICallResponseHandler,
@@ -119,6 +119,10 @@ export interface OpenAIChatCallSettings {
119
119
  maxTokens?: number;
120
120
  temperature?: number;
121
121
  topP?: number;
122
+ seed?: number | null;
123
+ responseFormat?: {
124
+ type?: "text" | "json_object";
125
+ };
122
126
  n?: number;
123
127
  presencePenalty?: number;
124
128
  frequencyPenalty?: number;
@@ -189,11 +193,12 @@ export declare class OpenAIChatModel extends AbstractModel<OpenAIChatSettings> i
189
193
  arguments: string;
190
194
  } | undefined;
191
195
  };
192
- finish_reason: string;
196
+ finish_reason: "length" | "stop" | "function_call" | "tool_calls" | "content_filter";
193
197
  index: number;
194
198
  logprobs?: any;
195
199
  }[];
196
200
  created: number;
201
+ system_fingerprint: string;
197
202
  };
198
203
  text: string;
199
204
  usage: {
@@ -229,11 +234,12 @@ export declare class OpenAIChatModel extends AbstractModel<OpenAIChatSettings> i
229
234
  arguments: string;
230
235
  } | undefined;
231
236
  };
232
- finish_reason: string;
237
+ finish_reason: "length" | "stop" | "function_call" | "tool_calls" | "content_filter";
233
238
  index: number;
234
239
  logprobs?: any;
235
240
  }[];
236
241
  created: number;
242
+ system_fingerprint: string;
237
243
  };
238
244
  valueText: string;
239
245
  value: any;
@@ -263,11 +269,12 @@ export declare class OpenAIChatModel extends AbstractModel<OpenAIChatSettings> i
263
269
  arguments: string;
264
270
  } | undefined;
265
271
  };
266
- finish_reason: string;
272
+ finish_reason: "length" | "stop" | "function_call" | "tool_calls" | "content_filter";
267
273
  index: number;
268
274
  logprobs?: any;
269
275
  }[];
270
276
  created: number;
277
+ system_fingerprint: string;
271
278
  };
272
279
  structureAndText: {
273
280
  structure: null;
@@ -299,11 +306,12 @@ export declare class OpenAIChatModel extends AbstractModel<OpenAIChatSettings> i
299
306
  arguments: string;
300
307
  } | undefined;
301
308
  };
302
- finish_reason: string;
309
+ finish_reason: "length" | "stop" | "function_call" | "tool_calls" | "content_filter";
303
310
  index: number;
304
311
  logprobs?: any;
305
312
  }[];
306
313
  created: number;
314
+ system_fingerprint: string;
307
315
  };
308
316
  structureAndText: {
309
317
  structure: string;
@@ -326,6 +334,7 @@ export declare class OpenAIChatModel extends AbstractModel<OpenAIChatSettings> i
326
334
  * Returns this model with an instruction prompt format.
327
335
  */
328
336
  withInstructionPrompt(): PromptFormatTextStreamingModel<import("../../../index.js").InstructionPrompt, OpenAIChatMessage[], OpenAIChatSettings, this>;
337
+ withVisionInstructionPrompt(): PromptFormatTextStreamingModel<import("../../../index.js").VisionInstructionPrompt, OpenAIChatMessage[], OpenAIChatSettings, this>;
329
338
  /**
330
339
  * Returns this model with a chat prompt format.
331
340
  */
@@ -338,6 +347,7 @@ declare const openAIChatResponseSchema: z.ZodObject<{
338
347
  object: z.ZodLiteral<"chat.completion">;
339
348
  created: z.ZodNumber;
340
349
  model: z.ZodString;
350
+ system_fingerprint: z.ZodString;
341
351
  choices: z.ZodArray<z.ZodObject<{
342
352
  message: z.ZodObject<{
343
353
  role: z.ZodLiteral<"assistant">;
@@ -369,7 +379,7 @@ declare const openAIChatResponseSchema: z.ZodObject<{
369
379
  }>;
370
380
  index: z.ZodNumber;
371
381
  logprobs: z.ZodNullable<z.ZodAny>;
372
- finish_reason: z.ZodString;
382
+ finish_reason: z.ZodEnum<["stop", "length", "tool_calls", "content_filter", "function_call"]>;
373
383
  }, "strip", z.ZodTypeAny, {
374
384
  message: {
375
385
  content: string | null;
@@ -379,7 +389,7 @@ declare const openAIChatResponseSchema: z.ZodObject<{
379
389
  arguments: string;
380
390
  } | undefined;
381
391
  };
382
- finish_reason: string;
392
+ finish_reason: "length" | "stop" | "function_call" | "tool_calls" | "content_filter";
383
393
  index: number;
384
394
  logprobs?: any;
385
395
  }, {
@@ -391,7 +401,7 @@ declare const openAIChatResponseSchema: z.ZodObject<{
391
401
  arguments: string;
392
402
  } | undefined;
393
403
  };
394
- finish_reason: string;
404
+ finish_reason: "length" | "stop" | "function_call" | "tool_calls" | "content_filter";
395
405
  index: number;
396
406
  logprobs?: any;
397
407
  }>, "many">;
@@ -426,11 +436,12 @@ declare const openAIChatResponseSchema: z.ZodObject<{
426
436
  arguments: string;
427
437
  } | undefined;
428
438
  };
429
- finish_reason: string;
439
+ finish_reason: "length" | "stop" | "function_call" | "tool_calls" | "content_filter";
430
440
  index: number;
431
441
  logprobs?: any;
432
442
  }[];
433
443
  created: number;
444
+ system_fingerprint: string;
434
445
  }, {
435
446
  object: "chat.completion";
436
447
  usage: {
@@ -449,11 +460,12 @@ declare const openAIChatResponseSchema: z.ZodObject<{
449
460
  arguments: string;
450
461
  } | undefined;
451
462
  };
452
- finish_reason: string;
463
+ finish_reason: "length" | "stop" | "function_call" | "tool_calls" | "content_filter";
453
464
  index: number;
454
465
  logprobs?: any;
455
466
  }[];
456
467
  created: number;
468
+ system_fingerprint: string;
457
469
  }>;
458
470
  export type OpenAIChatResponse = z.infer<typeof openAIChatResponseSchema>;
459
471
  export type OpenAIChatResponseFormatType<T> = {
@@ -484,11 +496,12 @@ export declare const OpenAIChatResponseFormat: {
484
496
  arguments: string;
485
497
  } | undefined;
486
498
  };
487
- finish_reason: string;
499
+ finish_reason: "length" | "stop" | "function_call" | "tool_calls" | "content_filter";
488
500
  index: number;
489
501
  logprobs?: any;
490
502
  }[];
491
503
  created: number;
504
+ system_fingerprint: string;
492
505
  }>;
493
506
  };
494
507
  /**
@@ -9,7 +9,7 @@ import { PromptFormatTextStreamingModel } from "../../../model-function/generate
9
9
  import { OpenAIApiConfiguration } from "../OpenAIApiConfiguration.js";
10
10
  import { failedOpenAICallResponseHandler } from "../OpenAIError.js";
11
11
  import { TikTokenTokenizer } from "../TikTokenTokenizer.js";
12
- import { mapChatPromptToOpenAIChatFormat, mapInstructionPromptToOpenAIChatFormat, } from "./OpenAIChatPromptFormat.js";
12
+ import { mapChatPromptToOpenAIChatFormat, mapInstructionPromptToOpenAIChatFormat, mapVisionInstructionPromptToOpenAIChatFormat, } from "./OpenAIChatPromptFormat.js";
13
13
  import { createOpenAIChatDeltaIterableQueue } from "./OpenAIChatStreamIterable.js";
14
14
  import { countOpenAIChatPromptTokens } from "./countOpenAIChatMessageTokens.js";
15
15
  /*
@@ -202,6 +202,7 @@ export class OpenAIChatModel extends AbstractModel {
202
202
  // map to OpenAI API names:
203
203
  stop: this.settings.stopSequences,
204
204
  maxTokens: this.settings.maxCompletionTokens,
205
+ openAIResponseFormat: this.settings.responseFormat,
205
206
  // other settings:
206
207
  user: this.settings.isUserIdForwardingEnabled
207
208
  ? options.run?.userId
@@ -354,6 +355,9 @@ export class OpenAIChatModel extends AbstractModel {
354
355
  withInstructionPrompt() {
355
356
  return this.withPromptFormat(mapInstructionPromptToOpenAIChatFormat());
356
357
  }
358
+ withVisionInstructionPrompt() {
359
+ return this.withPromptFormat(mapVisionInstructionPromptToOpenAIChatFormat());
360
+ }
357
361
  /**
358
362
  * Returns this model with a chat prompt format.
359
363
  */
@@ -380,6 +384,7 @@ const openAIChatResponseSchema = z.object({
380
384
  object: z.literal("chat.completion"),
381
385
  created: z.number(),
382
386
  model: z.string(),
387
+ system_fingerprint: z.string(),
383
388
  choices: z.array(z.object({
384
389
  message: z.object({
385
390
  role: z.literal("assistant"),
@@ -393,7 +398,13 @@ const openAIChatResponseSchema = z.object({
393
398
  }),
394
399
  index: z.number(),
395
400
  logprobs: z.nullable(z.any()),
396
- finish_reason: z.string(),
401
+ finish_reason: z.enum([
402
+ "stop",
403
+ "length",
404
+ "tool_calls",
405
+ "content_filter",
406
+ "function_call",
407
+ ]),
397
408
  })),
398
409
  usage: z.object({
399
410
  prompt_tokens: z.number(),
@@ -401,7 +412,7 @@ const openAIChatResponseSchema = z.object({
401
412
  total_tokens: z.number(),
402
413
  }),
403
414
  });
404
- async function callOpenAIChatCompletionAPI({ api = new OpenAIApiConfiguration(), abortSignal, responseFormat, model, messages, functions, functionCall, temperature, topP, n, stop, maxTokens, presencePenalty, frequencyPenalty, logitBias, user, }) {
415
+ async function callOpenAIChatCompletionAPI({ api = new OpenAIApiConfiguration(), abortSignal, responseFormat, model, messages, functions, functionCall, temperature, topP, n, stop, maxTokens, presencePenalty, frequencyPenalty, logitBias, user, openAIResponseFormat, seed, }) {
405
416
  // empty arrays are not allowed for stop:
406
417
  if (stop != null && Array.isArray(stop) && stop.length === 0) {
407
418
  stop = undefined;
@@ -423,6 +434,8 @@ async function callOpenAIChatCompletionAPI({ api = new OpenAIApiConfiguration(),
423
434
  presence_penalty: presencePenalty,
424
435
  frequency_penalty: frequencyPenalty,
425
436
  logit_bias: logitBias,
437
+ seed,
438
+ response_format: openAIResponseFormat,
426
439
  user,
427
440
  },
428
441
  failedResponseHandler: failedOpenAICallResponseHandler,
@@ -1,7 +1,7 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.mapChatPromptToOpenAIChatFormat = exports.mapInstructionPromptToOpenAIChatFormat = void 0;
4
- const validateChatPrompt_js_1 = require("../../../model-function/generate-text/validateChatPrompt.cjs");
3
+ exports.mapChatPromptToOpenAIChatFormat = exports.mapVisionInstructionPromptToOpenAIChatFormat = exports.mapInstructionPromptToOpenAIChatFormat = void 0;
4
+ const validateChatPrompt_js_1 = require("../../../model-function/generate-text/prompt-format/validateChatPrompt.cjs");
5
5
  /**
6
6
  * Formats an instruction prompt as an OpenAI chat prompt.
7
7
  */
@@ -31,6 +31,29 @@ function mapInstructionPromptToOpenAIChatFormat() {
31
31
  };
32
32
  }
33
33
  exports.mapInstructionPromptToOpenAIChatFormat = mapInstructionPromptToOpenAIChatFormat;
34
+ /**
35
+ * Formats a version prompt as an OpenAI chat prompt.
36
+ */
37
+ function mapVisionInstructionPromptToOpenAIChatFormat() {
38
+ return {
39
+ format: ({ instruction, image, mimeType }) => {
40
+ return [
41
+ {
42
+ role: "user",
43
+ content: [
44
+ { type: "text", text: instruction },
45
+ {
46
+ type: "image_url",
47
+ image_url: `data:${mimeType ?? "image/jpeg"};base64,${image}`,
48
+ },
49
+ ],
50
+ },
51
+ ];
52
+ },
53
+ stopSequences: [],
54
+ };
55
+ }
56
+ exports.mapVisionInstructionPromptToOpenAIChatFormat = mapVisionInstructionPromptToOpenAIChatFormat;
34
57
  /**
35
58
  * Formats a chat prompt as an OpenAI chat prompt.
36
59
  */
@@ -1,11 +1,16 @@
1
- import { ChatPrompt } from "../../../model-function/generate-text/ChatPrompt.js";
2
- import { InstructionPrompt } from "../../../model-function/generate-text/InstructionPrompt.js";
1
+ import { ChatPrompt } from "../../../model-function/generate-text/prompt-format/ChatPrompt.js";
2
+ import { InstructionPrompt } from "../../../model-function/generate-text/prompt-format/InstructionPrompt.js";
3
3
  import { TextGenerationPromptFormat } from "../../../model-function/generate-text/TextGenerationPromptFormat.js";
4
4
  import { OpenAIChatMessage } from "./OpenAIChatMessage.js";
5
+ import { VisionInstructionPrompt } from "../../../model-function/generate-text/prompt-format/VisionInstructionPrompt.js";
5
6
  /**
6
7
  * Formats an instruction prompt as an OpenAI chat prompt.
7
8
  */
8
9
  export declare function mapInstructionPromptToOpenAIChatFormat(): TextGenerationPromptFormat<InstructionPrompt, Array<OpenAIChatMessage>>;
10
+ /**
11
+ * Formats a version prompt as an OpenAI chat prompt.
12
+ */
13
+ export declare function mapVisionInstructionPromptToOpenAIChatFormat(): TextGenerationPromptFormat<VisionInstructionPrompt, Array<OpenAIChatMessage>>;
9
14
  /**
10
15
  * Formats a chat prompt as an OpenAI chat prompt.
11
16
  */
@@ -1,4 +1,4 @@
1
- import { validateChatPrompt } from "../../../model-function/generate-text/validateChatPrompt.js";
1
+ import { validateChatPrompt } from "../../../model-function/generate-text/prompt-format/validateChatPrompt.js";
2
2
  /**
3
3
  * Formats an instruction prompt as an OpenAI chat prompt.
4
4
  */
@@ -27,6 +27,28 @@ export function mapInstructionPromptToOpenAIChatFormat() {
27
27
  stopSequences: [],
28
28
  };
29
29
  }
30
+ /**
31
+ * Formats a version prompt as an OpenAI chat prompt.
32
+ */
33
+ export function mapVisionInstructionPromptToOpenAIChatFormat() {
34
+ return {
35
+ format: ({ instruction, image, mimeType }) => {
36
+ return [
37
+ {
38
+ role: "user",
39
+ content: [
40
+ { type: "text", text: instruction },
41
+ {
42
+ type: "image_url",
43
+ image_url: `data:${mimeType ?? "image/jpeg"};base64,${image}`,
44
+ },
45
+ ],
46
+ },
47
+ ];
48
+ },
49
+ stopSequences: [],
50
+ };
51
+ }
30
52
  /**
31
53
  * Formats a chat prompt as an OpenAI chat prompt.
32
54
  */
@@ -17,7 +17,16 @@ const chatResponseStreamEventSchema = zod_1.z.object({
17
17
  })
18
18
  .optional(),
19
19
  }),
20
- finish_reason: zod_1.z.enum(["stop", "length"]).nullable().optional(),
20
+ finish_reason: zod_1.z
21
+ .enum([
22
+ "stop",
23
+ "length",
24
+ "tool_calls",
25
+ "content_filter",
26
+ "function_call",
27
+ ])
28
+ .nullable()
29
+ .optional(),
21
30
  index: zod_1.z.number(),
22
31
  })),
23
32
  created: zod_1.z.number(),
@@ -14,7 +14,16 @@ const chatResponseStreamEventSchema = z.object({
14
14
  })
15
15
  .optional(),
16
16
  }),
17
- finish_reason: z.enum(["stop", "length"]).nullable().optional(),
17
+ finish_reason: z
18
+ .enum([
19
+ "stop",
20
+ "length",
21
+ "tool_calls",
22
+ "content_filter",
23
+ "function_call",
24
+ ])
25
+ .nullable()
26
+ .optional(),
18
27
  index: z.number(),
19
28
  })),
20
29
  created: z.number(),
@@ -15,10 +15,26 @@ exports.OPENAI_CHAT_PROMPT_BASE_TOKEN_COUNT = 2;
15
15
  */
16
16
  exports.OPENAI_CHAT_MESSAGE_BASE_TOKEN_COUNT = 5;
17
17
  async function countOpenAIChatMessageTokens({ message, model, }) {
18
- const contentTokenCount = await (0, countTokens_js_1.countTokens)(new TikTokenTokenizer_js_1.TikTokenTokenizer({
18
+ const tokenizer = new TikTokenTokenizer_js_1.TikTokenTokenizer({
19
19
  model: (0, OpenAIChatModel_js_1.getOpenAIChatModelInformation)(model).baseModel,
20
- }), message.content ?? "");
21
- return exports.OPENAI_CHAT_MESSAGE_BASE_TOKEN_COUNT + contentTokenCount;
20
+ });
21
+ // case: function call without content
22
+ if (message.content == null) {
23
+ return exports.OPENAI_CHAT_MESSAGE_BASE_TOKEN_COUNT;
24
+ }
25
+ // case: simple text content
26
+ if (typeof message.content === "string") {
27
+ return (exports.OPENAI_CHAT_MESSAGE_BASE_TOKEN_COUNT +
28
+ (await (0, countTokens_js_1.countTokens)(tokenizer, message.content)));
29
+ }
30
+ // case: array of content objects
31
+ let contentTokenCount = exports.OPENAI_CHAT_MESSAGE_BASE_TOKEN_COUNT;
32
+ for (const content of message.content) {
33
+ if (content.type === "text") {
34
+ contentTokenCount += await (0, countTokens_js_1.countTokens)(tokenizer, content.text);
35
+ }
36
+ }
37
+ return contentTokenCount;
22
38
  }
23
39
  exports.countOpenAIChatMessageTokens = countOpenAIChatMessageTokens;
24
40
  async function countOpenAIChatPromptTokens({ messages, model, }) {
@@ -12,10 +12,26 @@ export const OPENAI_CHAT_PROMPT_BASE_TOKEN_COUNT = 2;
12
12
  */
13
13
  export const OPENAI_CHAT_MESSAGE_BASE_TOKEN_COUNT = 5;
14
14
  export async function countOpenAIChatMessageTokens({ message, model, }) {
15
- const contentTokenCount = await countTokens(new TikTokenTokenizer({
15
+ const tokenizer = new TikTokenTokenizer({
16
16
  model: getOpenAIChatModelInformation(model).baseModel,
17
- }), message.content ?? "");
18
- return OPENAI_CHAT_MESSAGE_BASE_TOKEN_COUNT + contentTokenCount;
17
+ });
18
+ // case: function call without content
19
+ if (message.content == null) {
20
+ return OPENAI_CHAT_MESSAGE_BASE_TOKEN_COUNT;
21
+ }
22
+ // case: simple text content
23
+ if (typeof message.content === "string") {
24
+ return (OPENAI_CHAT_MESSAGE_BASE_TOKEN_COUNT +
25
+ (await countTokens(tokenizer, message.content)));
26
+ }
27
+ // case: array of content objects
28
+ let contentTokenCount = OPENAI_CHAT_MESSAGE_BASE_TOKEN_COUNT;
29
+ for (const content of message.content) {
30
+ if (content.type === "text") {
31
+ contentTokenCount += await countTokens(tokenizer, content.text);
32
+ }
33
+ }
34
+ return contentTokenCount;
19
35
  }
20
36
  export async function countOpenAIChatPromptTokens({ messages, model, }) {
21
37
  let tokens = OPENAI_CHAT_PROMPT_BASE_TOKEN_COUNT;
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "modelfusion",
3
3
  "description": "Build multimodal applications, chatbots, and agents with JavaScript and TypeScript.",
4
- "version": "0.62.0",
4
+ "version": "0.64.0",
5
5
  "author": "Lars Grammel",
6
6
  "license": "MIT",
7
7
  "keywords": [