modelfusion 0.69.0 → 0.71.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (93) hide show
  1. package/README.md +26 -13
  2. package/model-function/ModelCallEvent.d.ts +3 -2
  3. package/model-function/generate-text/index.cjs +1 -8
  4. package/model-function/generate-text/index.d.ts +1 -8
  5. package/model-function/generate-text/index.js +1 -8
  6. package/model-function/generate-text/prompt-format/AlpacaPromptFormat.cjs +31 -3
  7. package/model-function/generate-text/prompt-format/AlpacaPromptFormat.d.ts +29 -1
  8. package/model-function/generate-text/prompt-format/AlpacaPromptFormat.js +29 -1
  9. package/model-function/generate-text/prompt-format/ChatMLPromptFormat.cjs +79 -0
  10. package/model-function/generate-text/prompt-format/ChatMLPromptFormat.d.ts +31 -0
  11. package/model-function/generate-text/prompt-format/ChatMLPromptFormat.js +74 -0
  12. package/model-function/generate-text/prompt-format/ChatPrompt.d.ts +28 -23
  13. package/model-function/generate-text/prompt-format/ChatPromptValidationError.cjs +17 -0
  14. package/model-function/generate-text/prompt-format/ChatPromptValidationError.d.ts +8 -0
  15. package/model-function/generate-text/prompt-format/ChatPromptValidationError.js +13 -0
  16. package/model-function/generate-text/prompt-format/Llama2PromptFormat.cjs +41 -27
  17. package/model-function/generate-text/prompt-format/Llama2PromptFormat.d.ts +20 -2
  18. package/model-function/generate-text/prompt-format/Llama2PromptFormat.js +38 -24
  19. package/model-function/generate-text/prompt-format/TextPromptFormat.cjs +27 -30
  20. package/model-function/generate-text/prompt-format/TextPromptFormat.d.ts +7 -5
  21. package/model-function/generate-text/prompt-format/TextPromptFormat.js +24 -27
  22. package/model-function/generate-text/prompt-format/VicunaPromptFormat.cjs +21 -29
  23. package/model-function/generate-text/prompt-format/VicunaPromptFormat.d.ts +2 -2
  24. package/model-function/generate-text/prompt-format/VicunaPromptFormat.js +19 -27
  25. package/model-function/generate-text/prompt-format/index.cjs +39 -0
  26. package/model-function/generate-text/prompt-format/index.d.ts +10 -0
  27. package/model-function/generate-text/prompt-format/index.js +10 -0
  28. package/model-function/generate-text/prompt-format/trimChatPrompt.cjs +17 -22
  29. package/model-function/generate-text/prompt-format/trimChatPrompt.js +17 -22
  30. package/model-function/generate-text/prompt-format/validateChatPrompt.cjs +12 -24
  31. package/model-function/generate-text/prompt-format/validateChatPrompt.d.ts +0 -3
  32. package/model-function/generate-text/prompt-format/validateChatPrompt.js +10 -21
  33. package/model-function/generate-tool-call/NoSuchToolDefinitionError.cjs +41 -0
  34. package/model-function/generate-tool-call/NoSuchToolDefinitionError.d.ts +17 -0
  35. package/model-function/generate-tool-call/NoSuchToolDefinitionError.js +37 -0
  36. package/model-function/generate-tool-call/ToolCall.d.ts +5 -0
  37. package/model-function/generate-tool-call/ToolCallGenerationModel.d.ts +3 -3
  38. package/model-function/generate-tool-call/ToolCallParametersValidationError.cjs +1 -1
  39. package/model-function/generate-tool-call/ToolCallParametersValidationError.js +1 -1
  40. package/model-function/generate-tool-call/ToolCallsOrTextGenerationEvent.cjs +2 -0
  41. package/model-function/generate-tool-call/ToolCallsOrTextGenerationEvent.d.ts +23 -0
  42. package/model-function/generate-tool-call/ToolCallsOrTextGenerationEvent.js +1 -0
  43. package/model-function/generate-tool-call/ToolCallsOrTextGenerationModel.cjs +2 -0
  44. package/model-function/generate-tool-call/ToolCallsOrTextGenerationModel.d.ts +21 -0
  45. package/model-function/generate-tool-call/ToolCallsOrTextGenerationModel.js +1 -0
  46. package/model-function/generate-tool-call/ToolDefinition.cjs +2 -0
  47. package/model-function/generate-tool-call/{ToolCallDefinition.d.ts → ToolDefinition.d.ts} +1 -1
  48. package/model-function/generate-tool-call/ToolDefinition.js +1 -0
  49. package/model-function/generate-tool-call/generateToolCall.cjs +2 -1
  50. package/model-function/generate-tool-call/generateToolCall.d.ts +6 -11
  51. package/model-function/generate-tool-call/generateToolCall.js +2 -1
  52. package/model-function/generate-tool-call/generateToolCallsOrText.cjs +63 -0
  53. package/model-function/generate-tool-call/generateToolCallsOrText.d.ts +33 -0
  54. package/model-function/generate-tool-call/generateToolCallsOrText.js +59 -0
  55. package/model-function/generate-tool-call/index.cjs +7 -2
  56. package/model-function/generate-tool-call/index.d.ts +7 -2
  57. package/model-function/generate-tool-call/index.js +7 -2
  58. package/model-provider/anthropic/AnthropicPromptFormat.cjs +22 -26
  59. package/model-provider/anthropic/AnthropicPromptFormat.d.ts +4 -2
  60. package/model-provider/anthropic/AnthropicPromptFormat.js +19 -23
  61. package/model-provider/anthropic/AnthropicTextGenerationModel.cjs +2 -2
  62. package/model-provider/anthropic/AnthropicTextGenerationModel.js +3 -3
  63. package/model-provider/anthropic/index.cjs +14 -2
  64. package/model-provider/anthropic/index.d.ts +1 -1
  65. package/model-provider/anthropic/index.js +1 -1
  66. package/model-provider/cohere/CohereTextGenerationModel.cjs +3 -3
  67. package/model-provider/cohere/CohereTextGenerationModel.d.ts +1 -1
  68. package/model-provider/cohere/CohereTextGenerationModel.js +4 -4
  69. package/model-provider/llamacpp/{mapInstructionPromptToBakLLaVA1ForLlamaCppFormat.cjs → LlamaCppBakLLaVA1Format.cjs} +4 -4
  70. package/model-provider/llamacpp/{mapInstructionPromptToBakLLaVA1ForLlamaCppFormat.d.ts → LlamaCppBakLLaVA1Format.d.ts} +2 -2
  71. package/model-provider/llamacpp/{mapInstructionPromptToBakLLaVA1ForLlamaCppFormat.js → LlamaCppBakLLaVA1Format.js} +2 -2
  72. package/model-provider/llamacpp/index.cjs +14 -2
  73. package/model-provider/llamacpp/index.d.ts +1 -1
  74. package/model-provider/llamacpp/index.js +1 -1
  75. package/model-provider/openai/OpenAICompletionModel.cjs +4 -4
  76. package/model-provider/openai/OpenAICompletionModel.d.ts +1 -1
  77. package/model-provider/openai/OpenAICompletionModel.js +5 -5
  78. package/model-provider/openai/chat/OpenAIChatMessage.d.ts +4 -1
  79. package/model-provider/openai/chat/OpenAIChatModel.cjs +29 -3
  80. package/model-provider/openai/chat/OpenAIChatModel.d.ts +63 -16
  81. package/model-provider/openai/chat/OpenAIChatModel.js +30 -4
  82. package/model-provider/openai/chat/OpenAIChatPromptFormat.cjs +22 -34
  83. package/model-provider/openai/chat/OpenAIChatPromptFormat.d.ts +2 -2
  84. package/model-provider/openai/chat/OpenAIChatPromptFormat.js +19 -31
  85. package/model-provider/openai/index.cjs +14 -2
  86. package/model-provider/openai/index.d.ts +1 -1
  87. package/model-provider/openai/index.js +1 -1
  88. package/package.json +2 -2
  89. package/tool/Tool.cjs +1 -1
  90. package/tool/Tool.d.ts +1 -1
  91. package/tool/Tool.js +1 -1
  92. /package/model-function/generate-tool-call/{ToolCallDefinition.cjs → ToolCall.cjs} +0 -0
  93. /package/model-function/generate-tool-call/{ToolCallDefinition.js → ToolCall.js} +0 -0
@@ -0,0 +1,59 @@
1
+ import { executeStandardCall } from "../executeStandardCall.js";
2
+ import { NoSuchToolDefinitionError } from "./NoSuchToolDefinitionError.js";
3
+ import { ToolCallParametersValidationError } from "./ToolCallParametersValidationError.js";
4
+ export async function generateToolCallsOrText(model, tools, prompt, options) {
5
+ // Note: PROMPT must not be a function.
6
+ const expandedPrompt = typeof prompt === "function"
7
+ ? prompt(tools)
8
+ : prompt;
9
+ const fullResponse = await executeStandardCall({
10
+ functionType: "generate-tool-calls-or-text",
11
+ input: expandedPrompt,
12
+ model,
13
+ options,
14
+ generateResponse: async (options) => {
15
+ const result = await model.doGenerateToolCallsOrText(tools, expandedPrompt, options);
16
+ const { text, toolCalls: rawToolCalls } = result;
17
+ // no tool calls:
18
+ if (rawToolCalls == null) {
19
+ return {
20
+ response: result.response,
21
+ extractedValue: { text, toolCalls: null },
22
+ usage: result.usage,
23
+ };
24
+ }
25
+ // map tool calls:
26
+ const toolCalls = rawToolCalls.map((rawToolCall) => {
27
+ const tool = tools.find((tool) => tool.name === rawToolCall.name);
28
+ if (tool == undefined) {
29
+ throw new NoSuchToolDefinitionError({
30
+ toolName: rawToolCall.name,
31
+ parameters: rawToolCall.parameters,
32
+ });
33
+ }
34
+ const parseResult = tool.parameters.validate(rawToolCall.parameters);
35
+ if (!parseResult.success) {
36
+ throw new ToolCallParametersValidationError({
37
+ toolName: tool.name,
38
+ parameters: rawToolCall.parameters,
39
+ cause: parseResult.error,
40
+ });
41
+ }
42
+ return {
43
+ id: rawToolCall.id,
44
+ name: tool.name,
45
+ parameters: parseResult.data,
46
+ };
47
+ });
48
+ return {
49
+ response: result.response,
50
+ extractedValue: {
51
+ text,
52
+ toolCalls: toolCalls,
53
+ },
54
+ usage: result.usage,
55
+ };
56
+ },
57
+ });
58
+ return options?.returnType === "full" ? fullResponse : fullResponse.value;
59
+ }
@@ -14,8 +14,13 @@ var __exportStar = (this && this.__exportStar) || function(m, exports) {
14
14
  for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
15
15
  };
16
16
  Object.defineProperty(exports, "__esModule", { value: true });
17
- __exportStar(require("./ToolCallDefinition.cjs"), exports);
18
- __exportStar(require("./ToolCallParametersValidationError.cjs"), exports);
17
+ __exportStar(require("./ToolCall.cjs"), exports);
18
+ __exportStar(require("./ToolCallGenerationError.cjs"), exports);
19
19
  __exportStar(require("./ToolCallGenerationEvent.cjs"), exports);
20
20
  __exportStar(require("./ToolCallGenerationModel.cjs"), exports);
21
+ __exportStar(require("./ToolCallParametersValidationError.cjs"), exports);
22
+ __exportStar(require("./ToolCallsOrTextGenerationEvent.cjs"), exports);
23
+ __exportStar(require("./ToolCallsOrTextGenerationModel.cjs"), exports);
24
+ __exportStar(require("./ToolDefinition.cjs"), exports);
21
25
  __exportStar(require("./generateToolCall.cjs"), exports);
26
+ __exportStar(require("./generateToolCallsOrText.cjs"), exports);
@@ -1,5 +1,10 @@
1
- export * from "./ToolCallDefinition.js";
2
- export * from "./ToolCallParametersValidationError.js";
1
+ export * from "./ToolCall.js";
2
+ export * from "./ToolCallGenerationError.js";
3
3
  export * from "./ToolCallGenerationEvent.js";
4
4
  export * from "./ToolCallGenerationModel.js";
5
+ export * from "./ToolCallParametersValidationError.js";
6
+ export * from "./ToolCallsOrTextGenerationEvent.js";
7
+ export * from "./ToolCallsOrTextGenerationModel.js";
8
+ export * from "./ToolDefinition.js";
5
9
  export * from "./generateToolCall.js";
10
+ export * from "./generateToolCallsOrText.js";
@@ -1,5 +1,10 @@
1
- export * from "./ToolCallDefinition.js";
2
- export * from "./ToolCallParametersValidationError.js";
1
+ export * from "./ToolCall.js";
2
+ export * from "./ToolCallGenerationError.js";
3
3
  export * from "./ToolCallGenerationEvent.js";
4
4
  export * from "./ToolCallGenerationModel.js";
5
+ export * from "./ToolCallParametersValidationError.js";
6
+ export * from "./ToolCallsOrTextGenerationEvent.js";
7
+ export * from "./ToolCallsOrTextGenerationModel.js";
8
+ export * from "./ToolDefinition.js";
5
9
  export * from "./generateToolCall.js";
10
+ export * from "./generateToolCallsOrText.js";
@@ -1,11 +1,11 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.mapChatPromptToAnthropicFormat = exports.mapInstructionPromptToAnthropicFormat = void 0;
3
+ exports.chat = exports.instruction = void 0;
4
4
  const validateChatPrompt_js_1 = require("../../model-function/generate-text/prompt-format/validateChatPrompt.cjs");
5
5
  /**
6
6
  * Formats an instruction prompt as an Anthropic prompt.
7
7
  */
8
- function mapInstructionPromptToAnthropicFormat() {
8
+ function instruction() {
9
9
  return {
10
10
  format: (instruction) => {
11
11
  let text = "";
@@ -25,36 +25,32 @@ function mapInstructionPromptToAnthropicFormat() {
25
25
  stopSequences: [],
26
26
  };
27
27
  }
28
- exports.mapInstructionPromptToAnthropicFormat = mapInstructionPromptToAnthropicFormat;
28
+ exports.instruction = instruction;
29
29
  /**
30
30
  * Formats a chat prompt as an Anthropic prompt.
31
+ *
32
+ * @see https://docs.anthropic.com/claude/docs/constructing-a-prompt
31
33
  */
32
- function mapChatPromptToAnthropicFormat() {
34
+ function chat() {
33
35
  return {
34
36
  format: (chatPrompt) => {
35
37
  (0, validateChatPrompt_js_1.validateChatPrompt)(chatPrompt);
36
- let text = "";
37
- for (let i = 0; i < chatPrompt.length; i++) {
38
- const message = chatPrompt[i];
39
- // system message:
40
- if (i === 0 &&
41
- "system" in message &&
42
- typeof message.system === "string") {
43
- text += `${message.system}\n\n`;
44
- continue;
45
- }
46
- // user message
47
- if ("user" in message) {
48
- text += `\n\nHuman:${message.user}`;
49
- continue;
50
- }
51
- // ai message:
52
- if ("ai" in message) {
53
- text += `\n\nAssistant:${message.ai}`;
54
- continue;
38
+ let text = chatPrompt.system != null ? `${chatPrompt.system}\n\n` : "";
39
+ for (const { role, content } of chatPrompt.messages) {
40
+ switch (role) {
41
+ case "user": {
42
+ text += `\n\nHuman:${content}`;
43
+ break;
44
+ }
45
+ case "assistant": {
46
+ text += `\n\nAssistant:${content}`;
47
+ break;
48
+ }
49
+ default: {
50
+ const _exhaustiveCheck = role;
51
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
52
+ }
55
53
  }
56
- // unsupported message:
57
- throw new Error(`Unsupported message: ${JSON.stringify(message)}`);
58
54
  }
59
55
  // AI message prefix:
60
56
  text += `\n\nAssistant:`;
@@ -63,4 +59,4 @@ function mapChatPromptToAnthropicFormat() {
63
59
  stopSequences: [],
64
60
  };
65
61
  }
66
- exports.mapChatPromptToAnthropicFormat = mapChatPromptToAnthropicFormat;
62
+ exports.chat = chat;
@@ -4,8 +4,10 @@ import { TextGenerationPromptFormat } from "../../model-function/generate-text/T
4
4
  /**
5
5
  * Formats an instruction prompt as an Anthropic prompt.
6
6
  */
7
- export declare function mapInstructionPromptToAnthropicFormat(): TextGenerationPromptFormat<InstructionPrompt, string>;
7
+ export declare function instruction(): TextGenerationPromptFormat<InstructionPrompt, string>;
8
8
  /**
9
9
  * Formats a chat prompt as an Anthropic prompt.
10
+ *
11
+ * @see https://docs.anthropic.com/claude/docs/constructing-a-prompt
10
12
  */
11
- export declare function mapChatPromptToAnthropicFormat(): TextGenerationPromptFormat<ChatPrompt, string>;
13
+ export declare function chat(): TextGenerationPromptFormat<ChatPrompt, string>;
@@ -2,7 +2,7 @@ import { validateChatPrompt } from "../../model-function/generate-text/prompt-fo
2
2
  /**
3
3
  * Formats an instruction prompt as an Anthropic prompt.
4
4
  */
5
- export function mapInstructionPromptToAnthropicFormat() {
5
+ export function instruction() {
6
6
  return {
7
7
  format: (instruction) => {
8
8
  let text = "";
@@ -24,33 +24,29 @@ export function mapInstructionPromptToAnthropicFormat() {
24
24
  }
25
25
  /**
26
26
  * Formats a chat prompt as an Anthropic prompt.
27
+ *
28
+ * @see https://docs.anthropic.com/claude/docs/constructing-a-prompt
27
29
  */
28
- export function mapChatPromptToAnthropicFormat() {
30
+ export function chat() {
29
31
  return {
30
32
  format: (chatPrompt) => {
31
33
  validateChatPrompt(chatPrompt);
32
- let text = "";
33
- for (let i = 0; i < chatPrompt.length; i++) {
34
- const message = chatPrompt[i];
35
- // system message:
36
- if (i === 0 &&
37
- "system" in message &&
38
- typeof message.system === "string") {
39
- text += `${message.system}\n\n`;
40
- continue;
41
- }
42
- // user message
43
- if ("user" in message) {
44
- text += `\n\nHuman:${message.user}`;
45
- continue;
46
- }
47
- // ai message:
48
- if ("ai" in message) {
49
- text += `\n\nAssistant:${message.ai}`;
50
- continue;
34
+ let text = chatPrompt.system != null ? `${chatPrompt.system}\n\n` : "";
35
+ for (const { role, content } of chatPrompt.messages) {
36
+ switch (role) {
37
+ case "user": {
38
+ text += `\n\nHuman:${content}`;
39
+ break;
40
+ }
41
+ case "assistant": {
42
+ text += `\n\nAssistant:${content}`;
43
+ break;
44
+ }
45
+ default: {
46
+ const _exhaustiveCheck = role;
47
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
48
+ }
51
49
  }
52
- // unsupported message:
53
- throw new Error(`Unsupported message: ${JSON.stringify(message)}`);
54
50
  }
55
51
  // AI message prefix:
56
52
  text += `\n\nAssistant:`;
@@ -110,13 +110,13 @@ class AnthropicTextGenerationModel extends AbstractModel_js_1.AbstractModel {
110
110
  * Returns this model with an instruction prompt format.
111
111
  */
112
112
  withInstructionPrompt() {
113
- return this.withPromptFormat((0, AnthropicPromptFormat_js_1.mapInstructionPromptToAnthropicFormat)());
113
+ return this.withPromptFormat((0, AnthropicPromptFormat_js_1.instruction)());
114
114
  }
115
115
  /**
116
116
  * Returns this model with a chat prompt format.
117
117
  */
118
118
  withChatPrompt() {
119
- return this.withPromptFormat((0, AnthropicPromptFormat_js_1.mapChatPromptToAnthropicFormat)());
119
+ return this.withPromptFormat((0, AnthropicPromptFormat_js_1.chat)());
120
120
  }
121
121
  withPromptFormat(promptFormat) {
122
122
  return new PromptFormatTextStreamingModel_js_1.PromptFormatTextStreamingModel({
@@ -9,7 +9,7 @@ import { ZodSchema } from "../../core/schema/ZodSchema.js";
9
9
  import { parseJSON } from "../../core/schema/parseJSON.js";
10
10
  import { AnthropicApiConfiguration } from "./AnthropicApiConfiguration.js";
11
11
  import { failedAnthropicCallResponseHandler } from "./AnthropicError.js";
12
- import { mapChatPromptToAnthropicFormat, mapInstructionPromptToAnthropicFormat, } from "./AnthropicPromptFormat.js";
12
+ import { instruction, chat } from "./AnthropicPromptFormat.js";
13
13
  export const ANTHROPIC_TEXT_GENERATION_MODELS = {
14
14
  "claude-instant-1": {
15
15
  contextWindowSize: 100000,
@@ -107,13 +107,13 @@ export class AnthropicTextGenerationModel extends AbstractModel {
107
107
  * Returns this model with an instruction prompt format.
108
108
  */
109
109
  withInstructionPrompt() {
110
- return this.withPromptFormat(mapInstructionPromptToAnthropicFormat());
110
+ return this.withPromptFormat(instruction());
111
111
  }
112
112
  /**
113
113
  * Returns this model with a chat prompt format.
114
114
  */
115
115
  withChatPrompt() {
116
- return this.withPromptFormat(mapChatPromptToAnthropicFormat());
116
+ return this.withPromptFormat(chat());
117
117
  }
118
118
  withPromptFormat(promptFormat) {
119
119
  return new PromptFormatTextStreamingModel({
@@ -10,14 +10,26 @@ var __createBinding = (this && this.__createBinding) || (Object.create ? (functi
10
10
  if (k2 === undefined) k2 = k;
11
11
  o[k2] = m[k];
12
12
  }));
13
+ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
14
+ Object.defineProperty(o, "default", { enumerable: true, value: v });
15
+ }) : function(o, v) {
16
+ o["default"] = v;
17
+ });
13
18
  var __exportStar = (this && this.__exportStar) || function(m, exports) {
14
19
  for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
15
20
  };
21
+ var __importStar = (this && this.__importStar) || function (mod) {
22
+ if (mod && mod.__esModule) return mod;
23
+ var result = {};
24
+ if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
25
+ __setModuleDefault(result, mod);
26
+ return result;
27
+ };
16
28
  Object.defineProperty(exports, "__esModule", { value: true });
17
- exports.anthropicErrorDataSchema = exports.AnthropicError = void 0;
29
+ exports.AnthropicPromptFormat = exports.anthropicErrorDataSchema = exports.AnthropicError = void 0;
18
30
  __exportStar(require("./AnthropicApiConfiguration.cjs"), exports);
19
31
  var AnthropicError_js_1 = require("./AnthropicError.cjs");
20
32
  Object.defineProperty(exports, "AnthropicError", { enumerable: true, get: function () { return AnthropicError_js_1.AnthropicError; } });
21
33
  Object.defineProperty(exports, "anthropicErrorDataSchema", { enumerable: true, get: function () { return AnthropicError_js_1.anthropicErrorDataSchema; } });
22
- __exportStar(require("./AnthropicPromptFormat.cjs"), exports);
34
+ exports.AnthropicPromptFormat = __importStar(require("./AnthropicPromptFormat.cjs"));
23
35
  __exportStar(require("./AnthropicTextGenerationModel.cjs"), exports);
@@ -1,4 +1,4 @@
1
1
  export * from "./AnthropicApiConfiguration.js";
2
2
  export { AnthropicError, anthropicErrorDataSchema } from "./AnthropicError.js";
3
- export * from "./AnthropicPromptFormat.js";
3
+ export * as AnthropicPromptFormat from "./AnthropicPromptFormat.js";
4
4
  export * from "./AnthropicTextGenerationModel.js";
@@ -1,4 +1,4 @@
1
1
  export * from "./AnthropicApiConfiguration.js";
2
2
  export { AnthropicError, anthropicErrorDataSchema } from "./AnthropicError.js";
3
- export * from "./AnthropicPromptFormat.js";
3
+ export * as AnthropicPromptFormat from "./AnthropicPromptFormat.js";
4
4
  export * from "./AnthropicTextGenerationModel.js";
@@ -4,6 +4,7 @@ exports.CohereTextGenerationResponseFormat = exports.CohereTextGenerationModel =
4
4
  const zod_1 = require("zod");
5
5
  const callWithRetryAndThrottle_js_1 = require("../../core/api/callWithRetryAndThrottle.cjs");
6
6
  const postToApi_js_1 = require("../../core/api/postToApi.cjs");
7
+ const ZodSchema_js_1 = require("../../core/schema/ZodSchema.cjs");
7
8
  const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
8
9
  const PromptFormatTextStreamingModel_js_1 = require("../../model-function/generate-text/PromptFormatTextStreamingModel.cjs");
9
10
  const TextPromptFormat_js_1 = require("../../model-function/generate-text/prompt-format/TextPromptFormat.cjs");
@@ -13,7 +14,6 @@ const parseJsonStream_js_1 = require("../../util/streaming/parseJsonStream.cjs")
13
14
  const CohereApiConfiguration_js_1 = require("./CohereApiConfiguration.cjs");
14
15
  const CohereError_js_1 = require("./CohereError.cjs");
15
16
  const CohereTokenizer_js_1 = require("./CohereTokenizer.cjs");
16
- const ZodSchema_js_1 = require("../../core/schema/ZodSchema.cjs");
17
17
  exports.COHERE_TEXT_GENERATION_MODELS = {
18
18
  command: {
19
19
  contextWindowSize: 2048,
@@ -137,13 +137,13 @@ class CohereTextGenerationModel extends AbstractModel_js_1.AbstractModel {
137
137
  * Returns this model with an instruction prompt format.
138
138
  */
139
139
  withInstructionPrompt() {
140
- return this.withPromptFormat((0, TextPromptFormat_js_1.mapInstructionPromptToTextFormat)());
140
+ return this.withPromptFormat((0, TextPromptFormat_js_1.instruction)());
141
141
  }
142
142
  /**
143
143
  * Returns this model with a chat prompt format.
144
144
  */
145
145
  withChatPrompt(options) {
146
- return this.withPromptFormat((0, TextPromptFormat_js_1.mapChatPromptToTextFormat)(options));
146
+ return this.withPromptFormat((0, TextPromptFormat_js_1.chat)(options));
147
147
  }
148
148
  withPromptFormat(promptFormat) {
149
149
  return new PromptFormatTextStreamingModel_js_1.PromptFormatTextStreamingModel({
@@ -93,7 +93,7 @@ export declare class CohereTextGenerationModel extends AbstractModel<CohereTextG
93
93
  */
94
94
  withChatPrompt(options?: {
95
95
  user?: string;
96
- ai?: string;
96
+ assistant?: string;
97
97
  }): PromptFormatTextStreamingModel<import("../../index.js").ChatPrompt, string, CohereTextGenerationModelSettings, this>;
98
98
  withPromptFormat<INPUT_PROMPT>(promptFormat: TextGenerationPromptFormat<INPUT_PROMPT, string>): PromptFormatTextStreamingModel<INPUT_PROMPT, string, CohereTextGenerationModelSettings, this>;
99
99
  withSettings(additionalSettings: Partial<CohereTextGenerationModelSettings>): this;
@@ -1,16 +1,16 @@
1
1
  import { z } from "zod";
2
2
  import { callWithRetryAndThrottle } from "../../core/api/callWithRetryAndThrottle.js";
3
3
  import { createJsonResponseHandler, postJsonToApi, } from "../../core/api/postToApi.js";
4
+ import { ZodSchema } from "../../core/schema/ZodSchema.js";
4
5
  import { AbstractModel } from "../../model-function/AbstractModel.js";
5
6
  import { PromptFormatTextStreamingModel } from "../../model-function/generate-text/PromptFormatTextStreamingModel.js";
6
- import { mapChatPromptToTextFormat, mapInstructionPromptToTextFormat, } from "../../model-function/generate-text/prompt-format/TextPromptFormat.js";
7
+ import { chat, instruction, } from "../../model-function/generate-text/prompt-format/TextPromptFormat.js";
7
8
  import { countTokens } from "../../model-function/tokenize-text/countTokens.js";
8
9
  import { AsyncQueue } from "../../util/AsyncQueue.js";
9
10
  import { parseJsonStream } from "../../util/streaming/parseJsonStream.js";
10
11
  import { CohereApiConfiguration } from "./CohereApiConfiguration.js";
11
12
  import { failedCohereCallResponseHandler } from "./CohereError.js";
12
13
  import { CohereTokenizer } from "./CohereTokenizer.js";
13
- import { ZodSchema } from "../../core/schema/ZodSchema.js";
14
14
  export const COHERE_TEXT_GENERATION_MODELS = {
15
15
  command: {
16
16
  contextWindowSize: 2048,
@@ -134,13 +134,13 @@ export class CohereTextGenerationModel extends AbstractModel {
134
134
  * Returns this model with an instruction prompt format.
135
135
  */
136
136
  withInstructionPrompt() {
137
- return this.withPromptFormat(mapInstructionPromptToTextFormat());
137
+ return this.withPromptFormat(instruction());
138
138
  }
139
139
  /**
140
140
  * Returns this model with a chat prompt format.
141
141
  */
142
142
  withChatPrompt(options) {
143
- return this.withPromptFormat(mapChatPromptToTextFormat(options));
143
+ return this.withPromptFormat(chat(options));
144
144
  }
145
145
  withPromptFormat(promptFormat) {
146
146
  return new PromptFormatTextStreamingModel({
@@ -1,15 +1,15 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.mapInstructionPromptToBakLLaVA1ForLlamaCppFormat = void 0;
3
+ exports.instruction = void 0;
4
4
  // default Vicuna 1 system message
5
5
  const DEFAULT_SYSTEM_MESSAGE = "A chat between a curious user and an artificial intelligence assistant. " +
6
6
  "The assistant gives helpful, detailed, and polite answers to the user's questions.";
7
7
  /**
8
- * BakLLaVA 1 uses a Vicuna 1 prompt. This mapping combines it with the LlamaCpp prompts.
8
+ * BakLLaVA 1 uses a Vicuna 1 prompt. This mapping combines it with the LlamaCpp prompt structure.
9
9
  *
10
10
  * @see https://github.com/SkunkworksAI/BakLLaVA
11
11
  */
12
- function mapInstructionPromptToBakLLaVA1ForLlamaCppFormat() {
12
+ function instruction() {
13
13
  return {
14
14
  format: (instruction) => {
15
15
  let text = "";
@@ -33,4 +33,4 @@ function mapInstructionPromptToBakLLaVA1ForLlamaCppFormat() {
33
33
  stopSequences: [`\nUSER:`],
34
34
  };
35
35
  }
36
- exports.mapInstructionPromptToBakLLaVA1ForLlamaCppFormat = mapInstructionPromptToBakLLaVA1ForLlamaCppFormat;
36
+ exports.instruction = instruction;
@@ -2,8 +2,8 @@ import { InstructionPrompt } from "../../model-function/generate-text/prompt-for
2
2
  import { TextGenerationPromptFormat } from "../../model-function/generate-text/TextGenerationPromptFormat.js";
3
3
  import { LlamaCppTextGenerationPrompt } from "./LlamaCppTextGenerationModel.js";
4
4
  /**
5
- * BakLLaVA 1 uses a Vicuna 1 prompt. This mapping combines it with the LlamaCpp prompts.
5
+ * BakLLaVA 1 uses a Vicuna 1 prompt. This mapping combines it with the LlamaCpp prompt structure.
6
6
  *
7
7
  * @see https://github.com/SkunkworksAI/BakLLaVA
8
8
  */
9
- export declare function mapInstructionPromptToBakLLaVA1ForLlamaCppFormat(): TextGenerationPromptFormat<InstructionPrompt, LlamaCppTextGenerationPrompt>;
9
+ export declare function instruction(): TextGenerationPromptFormat<InstructionPrompt, LlamaCppTextGenerationPrompt>;
@@ -2,11 +2,11 @@
2
2
  const DEFAULT_SYSTEM_MESSAGE = "A chat between a curious user and an artificial intelligence assistant. " +
3
3
  "The assistant gives helpful, detailed, and polite answers to the user's questions.";
4
4
  /**
5
- * BakLLaVA 1 uses a Vicuna 1 prompt. This mapping combines it with the LlamaCpp prompts.
5
+ * BakLLaVA 1 uses a Vicuna 1 prompt. This mapping combines it with the LlamaCpp prompt structure.
6
6
  *
7
7
  * @see https://github.com/SkunkworksAI/BakLLaVA
8
8
  */
9
- export function mapInstructionPromptToBakLLaVA1ForLlamaCppFormat() {
9
+ export function instruction() {
10
10
  return {
11
11
  format: (instruction) => {
12
12
  let text = "";
@@ -10,15 +10,27 @@ var __createBinding = (this && this.__createBinding) || (Object.create ? (functi
10
10
  if (k2 === undefined) k2 = k;
11
11
  o[k2] = m[k];
12
12
  }));
13
+ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
14
+ Object.defineProperty(o, "default", { enumerable: true, value: v });
15
+ }) : function(o, v) {
16
+ o["default"] = v;
17
+ });
13
18
  var __exportStar = (this && this.__exportStar) || function(m, exports) {
14
19
  for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
15
20
  };
21
+ var __importStar = (this && this.__importStar) || function (mod) {
22
+ if (mod && mod.__esModule) return mod;
23
+ var result = {};
24
+ if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
25
+ __setModuleDefault(result, mod);
26
+ return result;
27
+ };
16
28
  Object.defineProperty(exports, "__esModule", { value: true });
17
- exports.LlamaCppError = void 0;
29
+ exports.LlamaCppError = exports.LlamaCppBakLLaVA1Format = void 0;
18
30
  __exportStar(require("./LlamaCppApiConfiguration.cjs"), exports);
31
+ exports.LlamaCppBakLLaVA1Format = __importStar(require("./LlamaCppBakLLaVA1Format.cjs"));
19
32
  var LlamaCppError_js_1 = require("./LlamaCppError.cjs");
20
33
  Object.defineProperty(exports, "LlamaCppError", { enumerable: true, get: function () { return LlamaCppError_js_1.LlamaCppError; } });
21
34
  __exportStar(require("./LlamaCppTextEmbeddingModel.cjs"), exports);
22
35
  __exportStar(require("./LlamaCppTextGenerationModel.cjs"), exports);
23
36
  __exportStar(require("./LlamaCppTokenizer.cjs"), exports);
24
- __exportStar(require("./mapInstructionPromptToBakLLaVA1ForLlamaCppFormat.cjs"), exports);
@@ -1,6 +1,6 @@
1
1
  export * from "./LlamaCppApiConfiguration.js";
2
+ export * as LlamaCppBakLLaVA1Format from "./LlamaCppBakLLaVA1Format.js";
2
3
  export { LlamaCppError, LlamaCppErrorData } from "./LlamaCppError.js";
3
4
  export * from "./LlamaCppTextEmbeddingModel.js";
4
5
  export * from "./LlamaCppTextGenerationModel.js";
5
6
  export * from "./LlamaCppTokenizer.js";
6
- export * from "./mapInstructionPromptToBakLLaVA1ForLlamaCppFormat.js";
@@ -1,6 +1,6 @@
1
1
  export * from "./LlamaCppApiConfiguration.js";
2
+ export * as LlamaCppBakLLaVA1Format from "./LlamaCppBakLLaVA1Format.js";
2
3
  export { LlamaCppError } from "./LlamaCppError.js";
3
4
  export * from "./LlamaCppTextEmbeddingModel.js";
4
5
  export * from "./LlamaCppTextGenerationModel.js";
5
6
  export * from "./LlamaCppTokenizer.js";
6
- export * from "./mapInstructionPromptToBakLLaVA1ForLlamaCppFormat.js";
@@ -4,17 +4,17 @@ exports.OpenAITextResponseFormat = exports.OpenAICompletionModel = exports.calcu
4
4
  const zod_1 = require("zod");
5
5
  const callWithRetryAndThrottle_js_1 = require("../../core/api/callWithRetryAndThrottle.cjs");
6
6
  const postToApi_js_1 = require("../../core/api/postToApi.cjs");
7
+ const ZodSchema_js_1 = require("../../core/schema/ZodSchema.cjs");
8
+ const parseJSON_js_1 = require("../../core/schema/parseJSON.cjs");
7
9
  const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
8
10
  const PromptFormatTextStreamingModel_js_1 = require("../../model-function/generate-text/PromptFormatTextStreamingModel.cjs");
9
11
  const TextPromptFormat_js_1 = require("../../model-function/generate-text/prompt-format/TextPromptFormat.cjs");
10
12
  const countTokens_js_1 = require("../../model-function/tokenize-text/countTokens.cjs");
11
13
  const AsyncQueue_js_1 = require("../../util/AsyncQueue.cjs");
12
- const parseJSON_js_1 = require("../../core/schema/parseJSON.cjs");
13
14
  const parseEventSourceStream_js_1 = require("../../util/streaming/parseEventSourceStream.cjs");
14
15
  const OpenAIApiConfiguration_js_1 = require("./OpenAIApiConfiguration.cjs");
15
16
  const OpenAIError_js_1 = require("./OpenAIError.cjs");
16
17
  const TikTokenTokenizer_js_1 = require("./TikTokenTokenizer.cjs");
17
- const ZodSchema_js_1 = require("../../core/schema/ZodSchema.cjs");
18
18
  /**
19
19
  * @see https://platform.openai.com/docs/models/
20
20
  * @see https://openai.com/pricing
@@ -242,13 +242,13 @@ class OpenAICompletionModel extends AbstractModel_js_1.AbstractModel {
242
242
  * Returns this model with an instruction prompt format.
243
243
  */
244
244
  withInstructionPrompt() {
245
- return this.withPromptFormat((0, TextPromptFormat_js_1.mapInstructionPromptToTextFormat)());
245
+ return this.withPromptFormat((0, TextPromptFormat_js_1.instruction)());
246
246
  }
247
247
  /**
248
248
  * Returns this model with a chat prompt format.
249
249
  */
250
250
  withChatPrompt(options) {
251
- return this.withPromptFormat((0, TextPromptFormat_js_1.mapChatPromptToTextFormat)(options));
251
+ return this.withPromptFormat((0, TextPromptFormat_js_1.chat)(options));
252
252
  }
253
253
  withPromptFormat(promptFormat) {
254
254
  return new PromptFormatTextStreamingModel_js_1.PromptFormatTextStreamingModel({
@@ -182,7 +182,7 @@ export declare class OpenAICompletionModel extends AbstractModel<OpenAICompletio
182
182
  */
183
183
  withChatPrompt(options?: {
184
184
  user?: string;
185
- ai?: string;
185
+ assistant?: string;
186
186
  }): PromptFormatTextStreamingModel<import("../../index.js").ChatPrompt, string, OpenAICompletionModelSettings, this>;
187
187
  withPromptFormat<INPUT_PROMPT>(promptFormat: TextGenerationPromptFormat<INPUT_PROMPT, string>): PromptFormatTextStreamingModel<INPUT_PROMPT, string, OpenAICompletionModelSettings, this>;
188
188
  withSettings(additionalSettings: Partial<OpenAICompletionModelSettings>): this;
@@ -1,17 +1,17 @@
1
1
  import { z } from "zod";
2
2
  import { callWithRetryAndThrottle } from "../../core/api/callWithRetryAndThrottle.js";
3
3
  import { createJsonResponseHandler, postJsonToApi, } from "../../core/api/postToApi.js";
4
+ import { ZodSchema } from "../../core/schema/ZodSchema.js";
5
+ import { parseJSON } from "../../core/schema/parseJSON.js";
4
6
  import { AbstractModel } from "../../model-function/AbstractModel.js";
5
7
  import { PromptFormatTextStreamingModel } from "../../model-function/generate-text/PromptFormatTextStreamingModel.js";
6
- import { mapChatPromptToTextFormat, mapInstructionPromptToTextFormat, } from "../../model-function/generate-text/prompt-format/TextPromptFormat.js";
8
+ import { chat, instruction, } from "../../model-function/generate-text/prompt-format/TextPromptFormat.js";
7
9
  import { countTokens } from "../../model-function/tokenize-text/countTokens.js";
8
10
  import { AsyncQueue } from "../../util/AsyncQueue.js";
9
- import { parseJSON } from "../../core/schema/parseJSON.js";
10
11
  import { parseEventSourceStream } from "../../util/streaming/parseEventSourceStream.js";
11
12
  import { OpenAIApiConfiguration } from "./OpenAIApiConfiguration.js";
12
13
  import { failedOpenAICallResponseHandler } from "./OpenAIError.js";
13
14
  import { TikTokenTokenizer } from "./TikTokenTokenizer.js";
14
- import { ZodSchema } from "../../core/schema/ZodSchema.js";
15
15
  /**
16
16
  * @see https://platform.openai.com/docs/models/
17
17
  * @see https://openai.com/pricing
@@ -236,13 +236,13 @@ export class OpenAICompletionModel extends AbstractModel {
236
236
  * Returns this model with an instruction prompt format.
237
237
  */
238
238
  withInstructionPrompt() {
239
- return this.withPromptFormat(mapInstructionPromptToTextFormat());
239
+ return this.withPromptFormat(instruction());
240
240
  }
241
241
  /**
242
242
  * Returns this model with a chat prompt format.
243
243
  */
244
244
  withChatPrompt(options) {
245
- return this.withPromptFormat(mapChatPromptToTextFormat(options));
245
+ return this.withPromptFormat(chat(options));
246
246
  }
247
247
  withPromptFormat(promptFormat) {
248
248
  return new PromptFormatTextStreamingModel({
@@ -9,7 +9,10 @@ export type OpenAIChatMessage = {
9
9
  text: string;
10
10
  } | {
11
11
  type: "image_url";
12
- image_url: string;
12
+ image_url: string | {
13
+ url: string;
14
+ detail: "low" | "high" | "auto";
15
+ };
13
16
  }>;
14
17
  name?: string;
15
18
  } | {