modelfusion 0.102.0 → 0.104.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (97) hide show
  1. package/CHANGELOG.md +27 -0
  2. package/model-function/generate-text/PromptTemplateTextGenerationModel.d.ts +1 -1
  3. package/model-function/generate-text/TextGenerationModel.cjs +7 -0
  4. package/model-function/generate-text/TextGenerationModel.d.ts +3 -1
  5. package/model-function/generate-text/TextGenerationModel.js +6 -1
  6. package/model-function/generate-text/TextGenerationResult.cjs +2 -0
  7. package/model-function/generate-text/TextGenerationResult.d.ts +11 -0
  8. package/model-function/generate-text/TextGenerationResult.js +1 -0
  9. package/model-function/generate-text/generateText.cjs +14 -9
  10. package/model-function/generate-text/generateText.d.ts +3 -0
  11. package/model-function/generate-text/generateText.js +14 -9
  12. package/model-function/generate-text/index.cjs +1 -0
  13. package/model-function/generate-text/index.d.ts +1 -0
  14. package/model-function/generate-text/index.js +1 -0
  15. package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.cjs +2 -1
  16. package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.d.ts +2 -2
  17. package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.js +2 -1
  18. package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.cjs +5 -4
  19. package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.d.ts +4 -4
  20. package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.js +5 -4
  21. package/model-function/generate-text/prompt-template/ChatPrompt.cjs +0 -24
  22. package/model-function/generate-text/prompt-template/ChatPrompt.d.ts +11 -34
  23. package/model-function/generate-text/prompt-template/ChatPrompt.js +1 -22
  24. package/model-function/generate-text/prompt-template/Content.cjs +9 -0
  25. package/model-function/generate-text/prompt-template/Content.d.ts +9 -4
  26. package/model-function/generate-text/prompt-template/Content.js +7 -1
  27. package/model-function/generate-text/prompt-template/InstructionPrompt.d.ts +6 -22
  28. package/model-function/generate-text/prompt-template/Llama2PromptTemplate.cjs +36 -5
  29. package/model-function/generate-text/prompt-template/Llama2PromptTemplate.d.ts +16 -4
  30. package/model-function/generate-text/prompt-template/Llama2PromptTemplate.js +34 -4
  31. package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.cjs +5 -4
  32. package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.d.ts +4 -4
  33. package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.js +5 -4
  34. package/model-function/generate-text/prompt-template/TextPromptTemplate.cjs +3 -4
  35. package/model-function/generate-text/prompt-template/TextPromptTemplate.d.ts +4 -4
  36. package/model-function/generate-text/prompt-template/TextPromptTemplate.js +3 -4
  37. package/model-function/generate-text/prompt-template/VicunaPromptTemplate.cjs +3 -3
  38. package/model-function/generate-text/prompt-template/VicunaPromptTemplate.d.ts +2 -2
  39. package/model-function/generate-text/prompt-template/VicunaPromptTemplate.js +3 -3
  40. package/model-function/generate-text/prompt-template/trimChatPrompt.cjs +0 -2
  41. package/model-function/generate-text/prompt-template/trimChatPrompt.d.ts +4 -4
  42. package/model-function/generate-text/prompt-template/trimChatPrompt.js +0 -2
  43. package/model-provider/anthropic/AnthropicPromptTemplate.cjs +5 -4
  44. package/model-provider/anthropic/AnthropicPromptTemplate.d.ts +4 -4
  45. package/model-provider/anthropic/AnthropicPromptTemplate.js +5 -4
  46. package/model-provider/anthropic/AnthropicTextGenerationModel.cjs +23 -8
  47. package/model-provider/anthropic/AnthropicTextGenerationModel.d.ts +8 -3
  48. package/model-provider/anthropic/AnthropicTextGenerationModel.js +24 -9
  49. package/model-provider/automatic1111/Automatic1111ImageGenerationModel.d.ts +1 -1
  50. package/model-provider/cohere/CohereTextGenerationModel.cjs +22 -6
  51. package/model-provider/cohere/CohereTextGenerationModel.d.ts +8 -3
  52. package/model-provider/cohere/CohereTextGenerationModel.js +22 -6
  53. package/model-provider/elevenlabs/ElevenLabsSpeechModel.cjs +2 -2
  54. package/model-provider/elevenlabs/ElevenLabsSpeechModel.js +2 -2
  55. package/model-provider/huggingface/HuggingFaceTextGenerationModel.cjs +9 -8
  56. package/model-provider/huggingface/HuggingFaceTextGenerationModel.d.ts +4 -5
  57. package/model-provider/huggingface/HuggingFaceTextGenerationModel.js +9 -8
  58. package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.cjs +23 -16
  59. package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.d.ts +4 -4
  60. package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.js +23 -16
  61. package/model-provider/llamacpp/LlamaCppTextGenerationModel.cjs +51 -51
  62. package/model-provider/llamacpp/LlamaCppTextGenerationModel.d.ts +14 -11
  63. package/model-provider/llamacpp/LlamaCppTextGenerationModel.js +51 -51
  64. package/model-provider/mistral/MistralChatModel.cjs +19 -2
  65. package/model-provider/mistral/MistralChatModel.d.ts +8 -3
  66. package/model-provider/mistral/MistralChatModel.js +19 -2
  67. package/model-provider/mistral/MistralPromptTemplate.cjs +5 -4
  68. package/model-provider/mistral/MistralPromptTemplate.d.ts +4 -4
  69. package/model-provider/mistral/MistralPromptTemplate.js +5 -4
  70. package/model-provider/ollama/OllamaChatModel.cjs +8 -3
  71. package/model-provider/ollama/OllamaChatModel.d.ts +6 -3
  72. package/model-provider/ollama/OllamaChatModel.js +8 -3
  73. package/model-provider/ollama/OllamaChatPromptTemplate.cjs +9 -13
  74. package/model-provider/ollama/OllamaChatPromptTemplate.d.ts +4 -4
  75. package/model-provider/ollama/OllamaChatPromptTemplate.js +9 -13
  76. package/model-provider/ollama/OllamaCompletionModel.cjs +8 -3
  77. package/model-provider/ollama/OllamaCompletionModel.d.ts +4 -1
  78. package/model-provider/ollama/OllamaCompletionModel.js +8 -3
  79. package/model-provider/openai/OpenAICompletionModel.cjs +20 -4
  80. package/model-provider/openai/OpenAICompletionModel.d.ts +8 -3
  81. package/model-provider/openai/OpenAICompletionModel.js +20 -4
  82. package/model-provider/openai/chat/AbstractOpenAIChatModel.cjs +19 -1
  83. package/model-provider/openai/chat/AbstractOpenAIChatModel.d.ts +6 -1
  84. package/model-provider/openai/chat/AbstractOpenAIChatModel.js +19 -1
  85. package/model-provider/openai/chat/OpenAIChatFunctionCallStructureGenerationModel.d.ts +2 -2
  86. package/model-provider/openai/chat/OpenAIChatMessage.d.ts +2 -2
  87. package/model-provider/openai/chat/OpenAIChatModel.cjs +2 -3
  88. package/model-provider/openai/chat/OpenAIChatModel.d.ts +2 -2
  89. package/model-provider/openai/chat/OpenAIChatModel.js +2 -3
  90. package/model-provider/openai/chat/OpenAIChatPromptTemplate.cjs +0 -2
  91. package/model-provider/openai/chat/OpenAIChatPromptTemplate.d.ts +4 -4
  92. package/model-provider/openai/chat/OpenAIChatPromptTemplate.js +0 -2
  93. package/model-provider/openai-compatible/OpenAICompatibleChatModel.cjs +2 -3
  94. package/model-provider/openai-compatible/OpenAICompatibleChatModel.d.ts +2 -2
  95. package/model-provider/openai-compatible/OpenAICompatibleChatModel.js +2 -3
  96. package/model-provider/stability/StabilityImageGenerationModel.d.ts +6 -6
  97. package/package.json +2 -2
@@ -1,7 +1,8 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.chat = exports.instruction = exports.text = void 0;
4
- const ChatPrompt_js_1 = require("./ChatPrompt.cjs");
3
+ exports.validateLlama2Prompt = exports.chat = exports.instruction = exports.text = void 0;
4
+ const Content_js_1 = require("./Content.cjs");
5
+ const InvalidPromptError_js_1 = require("./InvalidPromptError.cjs");
5
6
  // see https://github.com/facebookresearch/llama/blob/6c7fe276574e78057f917549435a2554000a876d/llama/generation.py#L44
6
7
  const BEGIN_SEGMENT = "<s>";
7
8
  const END_SEGMENT = " </s>";
@@ -47,9 +48,10 @@ function instruction() {
47
48
  return {
48
49
  stopSequences: [END_SEGMENT],
49
50
  format(prompt) {
51
+ const instruction = (0, Content_js_1.validateContentIsString)(prompt.instruction, prompt);
50
52
  return `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${prompt.system != null
51
53
  ? `${BEGIN_SYSTEM}${prompt.system}${END_SYSTEM}`
52
- : ""}${prompt.instruction}${END_INSTRUCTION}${prompt.responsePrefix ?? ""}`;
54
+ : ""}${instruction}${END_INSTRUCTION}${prompt.responsePrefix ?? ""}`;
53
55
  },
54
56
  };
55
57
  }
@@ -69,7 +71,7 @@ exports.instruction = instruction;
69
71
  function chat() {
70
72
  return {
71
73
  format(prompt) {
72
- (0, ChatPrompt_js_1.validateChatPrompt)(prompt);
74
+ validateLlama2Prompt(prompt);
73
75
  let text = prompt.system != null
74
76
  ? // Separate section for system message to simplify implementation
75
77
  // (this is slightly different from the original instructions):
@@ -78,7 +80,8 @@ function chat() {
78
80
  for (const { role, content } of prompt.messages) {
79
81
  switch (role) {
80
82
  case "user": {
81
- text += `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${content}${END_INSTRUCTION}`;
83
+ const textContent = (0, Content_js_1.validateContentIsString)(content, prompt);
84
+ text += `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${textContent}${END_INSTRUCTION}`;
82
85
  break;
83
86
  }
84
87
  case "assistant": {
@@ -97,3 +100,31 @@ function chat() {
97
100
  };
98
101
  }
99
102
  exports.chat = chat;
103
+ /**
104
+ * Checks if a Llama2 chat prompt is valid. Throws a {@link ChatPromptValidationError} if it's not.
105
+ *
106
+ * - The first message of the chat must be a user message.
107
+ * - Then it must be alternating between an assistant message and a user message.
108
+ * - The last message must always be a user message (when submitting to a model).
109
+ *
110
+ * The type checking is done at runtime when you submit a chat prompt to a model with a prompt template.
111
+ *
112
+ * @throws {@link ChatPromptValidationError}
113
+ */
114
+ function validateLlama2Prompt(chatPrompt) {
115
+ const messages = chatPrompt.messages;
116
+ if (messages.length < 1) {
117
+ throw new InvalidPromptError_js_1.InvalidPromptError("ChatPrompt should have at least one message.", chatPrompt);
118
+ }
119
+ for (let i = 0; i < messages.length; i++) {
120
+ const expectedRole = i % 2 === 0 ? "user" : "assistant";
121
+ const role = messages[i].role;
122
+ if (role !== expectedRole) {
123
+ throw new InvalidPromptError_js_1.InvalidPromptError(`Message at index ${i} should have role '${expectedRole}', but has role '${role}'.`, chatPrompt);
124
+ }
125
+ }
126
+ if (messages.length % 2 === 0) {
127
+ throw new InvalidPromptError_js_1.InvalidPromptError("The last message must be a user message.", chatPrompt);
128
+ }
129
+ }
130
+ exports.validateLlama2Prompt = validateLlama2Prompt;
@@ -1,6 +1,6 @@
1
1
  import { TextGenerationPromptTemplate } from "../TextGenerationPromptTemplate.js";
2
- import { TextChatPrompt } from "./ChatPrompt.js";
3
- import { TextInstructionPrompt } from "./InstructionPrompt.js";
2
+ import { ChatPrompt } from "./ChatPrompt.js";
3
+ import { InstructionPrompt } from "./InstructionPrompt.js";
4
4
  /**
5
5
  * Formats a text prompt as a Llama 2 prompt.
6
6
  *
@@ -27,7 +27,7 @@ export declare function text(): TextGenerationPromptTemplate<string, string>;
27
27
  *
28
28
  * @see https://www.philschmid.de/llama-2#how-to-prompt-llama-2-chat
29
29
  */
30
- export declare function instruction(): TextGenerationPromptTemplate<TextInstructionPrompt, string>;
30
+ export declare function instruction(): TextGenerationPromptTemplate<InstructionPrompt, string>;
31
31
  /**
32
32
  * Formats a chat prompt as a Llama 2 prompt.
33
33
  *
@@ -40,4 +40,16 @@ export declare function instruction(): TextGenerationPromptTemplate<TextInstruct
40
40
  * ${ user msg 1 } [/INST] ${ model response 1 } </s><s>[INST] ${ user msg 2 } [/INST] ${ model response 2 } </s><s>[INST] ${ user msg 3 } [/INST]
41
41
  * ```
42
42
  */
43
- export declare function chat(): TextGenerationPromptTemplate<TextChatPrompt, string>;
43
+ export declare function chat(): TextGenerationPromptTemplate<ChatPrompt, string>;
44
+ /**
45
+ * Checks if a Llama2 chat prompt is valid. Throws a {@link ChatPromptValidationError} if it's not.
46
+ *
47
+ * - The first message of the chat must be a user message.
48
+ * - Then it must be alternating between an assistant message and a user message.
49
+ * - The last message must always be a user message (when submitting to a model).
50
+ *
51
+ * The type checking is done at runtime when you submit a chat prompt to a model with a prompt template.
52
+ *
53
+ * @throws {@link ChatPromptValidationError}
54
+ */
55
+ export declare function validateLlama2Prompt(chatPrompt: ChatPrompt): void;
@@ -1,4 +1,5 @@
1
- import { validateChatPrompt } from "./ChatPrompt.js";
1
+ import { validateContentIsString } from "./Content.js";
2
+ import { InvalidPromptError } from "./InvalidPromptError.js";
2
3
  // see https://github.com/facebookresearch/llama/blob/6c7fe276574e78057f917549435a2554000a876d/llama/generation.py#L44
3
4
  const BEGIN_SEGMENT = "<s>";
4
5
  const END_SEGMENT = " </s>";
@@ -43,9 +44,10 @@ export function instruction() {
43
44
  return {
44
45
  stopSequences: [END_SEGMENT],
45
46
  format(prompt) {
47
+ const instruction = validateContentIsString(prompt.instruction, prompt);
46
48
  return `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${prompt.system != null
47
49
  ? `${BEGIN_SYSTEM}${prompt.system}${END_SYSTEM}`
48
- : ""}${prompt.instruction}${END_INSTRUCTION}${prompt.responsePrefix ?? ""}`;
50
+ : ""}${instruction}${END_INSTRUCTION}${prompt.responsePrefix ?? ""}`;
49
51
  },
50
52
  };
51
53
  }
@@ -64,7 +66,7 @@ export function instruction() {
64
66
  export function chat() {
65
67
  return {
66
68
  format(prompt) {
67
- validateChatPrompt(prompt);
69
+ validateLlama2Prompt(prompt);
68
70
  let text = prompt.system != null
69
71
  ? // Separate section for system message to simplify implementation
70
72
  // (this is slightly different from the original instructions):
@@ -73,7 +75,8 @@ export function chat() {
73
75
  for (const { role, content } of prompt.messages) {
74
76
  switch (role) {
75
77
  case "user": {
76
- text += `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${content}${END_INSTRUCTION}`;
78
+ const textContent = validateContentIsString(content, prompt);
79
+ text += `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${textContent}${END_INSTRUCTION}`;
77
80
  break;
78
81
  }
79
82
  case "assistant": {
@@ -91,3 +94,30 @@ export function chat() {
91
94
  stopSequences: [END_SEGMENT],
92
95
  };
93
96
  }
97
+ /**
98
+ * Checks if a Llama2 chat prompt is valid. Throws a {@link ChatPromptValidationError} if it's not.
99
+ *
100
+ * - The first message of the chat must be a user message.
101
+ * - Then it must be alternating between an assistant message and a user message.
102
+ * - The last message must always be a user message (when submitting to a model).
103
+ *
104
+ * The type checking is done at runtime when you submit a chat prompt to a model with a prompt template.
105
+ *
106
+ * @throws {@link ChatPromptValidationError}
107
+ */
108
+ export function validateLlama2Prompt(chatPrompt) {
109
+ const messages = chatPrompt.messages;
110
+ if (messages.length < 1) {
111
+ throw new InvalidPromptError("ChatPrompt should have at least one message.", chatPrompt);
112
+ }
113
+ for (let i = 0; i < messages.length; i++) {
114
+ const expectedRole = i % 2 === 0 ? "user" : "assistant";
115
+ const role = messages[i].role;
116
+ if (role !== expectedRole) {
117
+ throw new InvalidPromptError(`Message at index ${i} should have role '${expectedRole}', but has role '${role}'.`, chatPrompt);
118
+ }
119
+ }
120
+ if (messages.length % 2 === 0) {
121
+ throw new InvalidPromptError("The last message must be a user message.", chatPrompt);
122
+ }
123
+ }
@@ -1,7 +1,7 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.chat = exports.instruction = exports.text = void 0;
4
- const ChatPrompt_js_1 = require("./ChatPrompt.cjs");
4
+ const Content_js_1 = require("./Content.cjs");
5
5
  const roleNames = {
6
6
  system: "System",
7
7
  user: "User",
@@ -36,8 +36,9 @@ exports.text = text;
36
36
  const instruction = () => ({
37
37
  stopSequences: [],
38
38
  format(prompt) {
39
+ const instruction = (0, Content_js_1.validateContentIsString)(prompt.instruction, prompt);
39
40
  return (segment("system", prompt.system) +
40
- segment("user", prompt.instruction) +
41
+ segment("user", instruction) +
41
42
  segmentStart("assistant") +
42
43
  (prompt.responsePrefix ?? ""));
43
44
  },
@@ -53,12 +54,12 @@ exports.instruction = instruction;
53
54
  function chat() {
54
55
  return {
55
56
  format(prompt) {
56
- (0, ChatPrompt_js_1.validateChatPrompt)(prompt);
57
57
  let text = prompt.system != null ? segment("system", prompt.system) : "";
58
58
  for (const { role, content } of prompt.messages) {
59
59
  switch (role) {
60
60
  case "user": {
61
- text += segment("user", content);
61
+ const textContent = (0, Content_js_1.validateContentIsString)(content, prompt);
62
+ text += segment("user", textContent);
62
63
  break;
63
64
  }
64
65
  case "assistant": {
@@ -1,6 +1,6 @@
1
1
  import { TextGenerationPromptTemplate } from "../TextGenerationPromptTemplate.js";
2
- import { TextChatPrompt } from "./ChatPrompt.js";
3
- import { TextInstructionPrompt } from "./InstructionPrompt.js";
2
+ import { ChatPrompt } from "./ChatPrompt.js";
3
+ import { InstructionPrompt } from "./InstructionPrompt.js";
4
4
  /**
5
5
  * Formats a text prompt as a neural chat prompt.
6
6
  *
@@ -12,7 +12,7 @@ export declare function text(): TextGenerationPromptTemplate<string, string>;
12
12
  *
13
13
  * @see https://huggingface.co/Intel/neural-chat-7b-v3-1#prompt-template
14
14
  */
15
- export declare const instruction: () => TextGenerationPromptTemplate<TextInstructionPrompt, string>;
15
+ export declare const instruction: () => TextGenerationPromptTemplate<InstructionPrompt, string>;
16
16
  /**
17
17
  * Formats a chat prompt as a basic text prompt.
18
18
  *
@@ -20,4 +20,4 @@ export declare const instruction: () => TextGenerationPromptTemplate<TextInstruc
20
20
  * @param assistant The label of the assistant in the chat. Default to "assistant".
21
21
  * @param system The label of the system in the chat. Optional, defaults to no prefix.
22
22
  */
23
- export declare function chat(): TextGenerationPromptTemplate<TextChatPrompt, string>;
23
+ export declare function chat(): TextGenerationPromptTemplate<ChatPrompt, string>;
@@ -1,4 +1,4 @@
1
- import { validateChatPrompt } from "./ChatPrompt.js";
1
+ import { validateContentIsString } from "./Content.js";
2
2
  const roleNames = {
3
3
  system: "System",
4
4
  user: "User",
@@ -32,8 +32,9 @@ export function text() {
32
32
  export const instruction = () => ({
33
33
  stopSequences: [],
34
34
  format(prompt) {
35
+ const instruction = validateContentIsString(prompt.instruction, prompt);
35
36
  return (segment("system", prompt.system) +
36
- segment("user", prompt.instruction) +
37
+ segment("user", instruction) +
37
38
  segmentStart("assistant") +
38
39
  (prompt.responsePrefix ?? ""));
39
40
  },
@@ -48,12 +49,12 @@ export const instruction = () => ({
48
49
  export function chat() {
49
50
  return {
50
51
  format(prompt) {
51
- validateChatPrompt(prompt);
52
52
  let text = prompt.system != null ? segment("system", prompt.system) : "";
53
53
  for (const { role, content } of prompt.messages) {
54
54
  switch (role) {
55
55
  case "user": {
56
- text += segment("user", content);
56
+ const textContent = validateContentIsString(content, prompt);
57
+ text += segment("user", textContent);
57
58
  break;
58
59
  }
59
60
  case "assistant": {
@@ -1,7 +1,7 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.chat = exports.instruction = exports.text = void 0;
4
- const ChatPrompt_js_1 = require("./ChatPrompt.cjs");
4
+ const Content_js_1 = require("./Content.cjs");
5
5
  /**
6
6
  * Formats a text prompt as a basic text prompt. Does not change the text prompt in any way.
7
7
  */
@@ -20,7 +20,7 @@ const instruction = () => ({
20
20
  if (prompt.system != null) {
21
21
  text += `${prompt.system}\n\n`;
22
22
  }
23
- text += `${prompt.instruction}\n\n`;
23
+ text += `${(0, Content_js_1.validateContentIsString)(prompt.instruction, prompt)}\n\n`;
24
24
  if (prompt.responsePrefix != null) {
25
25
  text += prompt.responsePrefix;
26
26
  }
@@ -37,14 +37,13 @@ exports.instruction = instruction;
37
37
  */
38
38
  const chat = ({ user = "user", assistant = "assistant", system, } = {}) => ({
39
39
  format(prompt) {
40
- (0, ChatPrompt_js_1.validateChatPrompt)(prompt);
41
40
  let text = prompt.system != null
42
41
  ? `${system != null ? `${system}:` : ""}${prompt.system}\n\n`
43
42
  : "";
44
43
  for (const { role, content } of prompt.messages) {
45
44
  switch (role) {
46
45
  case "user": {
47
- text += `${user}:\n${content}\n\n`;
46
+ text += `${user}:\n${(0, Content_js_1.validateContentIsString)(content, prompt)}\n\n`;
48
47
  break;
49
48
  }
50
49
  case "assistant": {
@@ -1,6 +1,6 @@
1
1
  import { TextGenerationPromptTemplate } from "../TextGenerationPromptTemplate.js";
2
- import { TextChatPrompt } from "./ChatPrompt.js";
3
- import { TextInstructionPrompt } from "./InstructionPrompt.js";
2
+ import { ChatPrompt } from "./ChatPrompt.js";
3
+ import { InstructionPrompt } from "./InstructionPrompt.js";
4
4
  /**
5
5
  * Formats a text prompt as a basic text prompt. Does not change the text prompt in any way.
6
6
  */
@@ -8,7 +8,7 @@ export declare const text: () => TextGenerationPromptTemplate<string, string>;
8
8
  /**
9
9
  * Formats an instruction prompt as a basic text prompt.
10
10
  */
11
- export declare const instruction: () => TextGenerationPromptTemplate<TextInstructionPrompt, string>;
11
+ export declare const instruction: () => TextGenerationPromptTemplate<InstructionPrompt, string>;
12
12
  /**
13
13
  * Formats a chat prompt as a basic text prompt.
14
14
  *
@@ -20,4 +20,4 @@ export declare const chat: (options?: {
20
20
  user?: string;
21
21
  assistant?: string;
22
22
  system?: string;
23
- }) => TextGenerationPromptTemplate<TextChatPrompt, string>;
23
+ }) => TextGenerationPromptTemplate<ChatPrompt, string>;
@@ -1,4 +1,4 @@
1
- import { validateChatPrompt } from "./ChatPrompt.js";
1
+ import { validateContentIsString } from "./Content.js";
2
2
  /**
3
3
  * Formats a text prompt as a basic text prompt. Does not change the text prompt in any way.
4
4
  */
@@ -16,7 +16,7 @@ export const instruction = () => ({
16
16
  if (prompt.system != null) {
17
17
  text += `${prompt.system}\n\n`;
18
18
  }
19
- text += `${prompt.instruction}\n\n`;
19
+ text += `${validateContentIsString(prompt.instruction, prompt)}\n\n`;
20
20
  if (prompt.responsePrefix != null) {
21
21
  text += prompt.responsePrefix;
22
22
  }
@@ -32,14 +32,13 @@ export const instruction = () => ({
32
32
  */
33
33
  export const chat = ({ user = "user", assistant = "assistant", system, } = {}) => ({
34
34
  format(prompt) {
35
- validateChatPrompt(prompt);
36
35
  let text = prompt.system != null
37
36
  ? `${system != null ? `${system}:` : ""}${prompt.system}\n\n`
38
37
  : "";
39
38
  for (const { role, content } of prompt.messages) {
40
39
  switch (role) {
41
40
  case "user": {
42
- text += `${user}:\n${content}\n\n`;
41
+ text += `${user}:\n${validateContentIsString(content, prompt)}\n\n`;
43
42
  break;
44
43
  }
45
44
  case "assistant": {
@@ -1,7 +1,7 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.chat = void 0;
4
- const ChatPrompt_js_1 = require("./ChatPrompt.cjs");
4
+ const Content_js_1 = require("./Content.cjs");
5
5
  // default Vicuna 1 system message
6
6
  const DEFAULT_SYSTEM_MESSAGE = "A chat between a curious user and an artificial intelligence assistant. " +
7
7
  "The assistant gives helpful, detailed, and polite answers to the user's questions.";
@@ -21,14 +21,14 @@ const DEFAULT_SYSTEM_MESSAGE = "A chat between a curious user and an artificial
21
21
  function chat() {
22
22
  return {
23
23
  format(prompt) {
24
- (0, ChatPrompt_js_1.validateChatPrompt)(prompt);
25
24
  let text = prompt.system != null
26
25
  ? `${prompt.system}\n\n`
27
26
  : `${DEFAULT_SYSTEM_MESSAGE}\n\n`;
28
27
  for (const { role, content } of prompt.messages) {
29
28
  switch (role) {
30
29
  case "user": {
31
- text += `USER: ${content}\n`;
30
+ const textContent = (0, Content_js_1.validateContentIsString)(content, prompt);
31
+ text += `USER: ${textContent}\n`;
32
32
  break;
33
33
  }
34
34
  case "assistant": {
@@ -1,5 +1,5 @@
1
1
  import { TextGenerationPromptTemplate } from "../TextGenerationPromptTemplate.js";
2
- import { TextChatPrompt } from "./ChatPrompt.js";
2
+ import { ChatPrompt } from "./ChatPrompt.js";
3
3
  /**
4
4
  * Formats a chat prompt as a Vicuna prompt.
5
5
  *
@@ -13,4 +13,4 @@ import { TextChatPrompt } from "./ChatPrompt.js";
13
13
  * ASSISTANT:
14
14
  * ```
15
15
  */
16
- export declare function chat(): TextGenerationPromptTemplate<TextChatPrompt, string>;
16
+ export declare function chat(): TextGenerationPromptTemplate<ChatPrompt, string>;
@@ -1,4 +1,4 @@
1
- import { validateChatPrompt } from "./ChatPrompt.js";
1
+ import { validateContentIsString } from "./Content.js";
2
2
  // default Vicuna 1 system message
3
3
  const DEFAULT_SYSTEM_MESSAGE = "A chat between a curious user and an artificial intelligence assistant. " +
4
4
  "The assistant gives helpful, detailed, and polite answers to the user's questions.";
@@ -18,14 +18,14 @@ const DEFAULT_SYSTEM_MESSAGE = "A chat between a curious user and an artificial
18
18
  export function chat() {
19
19
  return {
20
20
  format(prompt) {
21
- validateChatPrompt(prompt);
22
21
  let text = prompt.system != null
23
22
  ? `${prompt.system}\n\n`
24
23
  : `${DEFAULT_SYSTEM_MESSAGE}\n\n`;
25
24
  for (const { role, content } of prompt.messages) {
26
25
  switch (role) {
27
26
  case "user": {
28
- text += `USER: ${content}\n`;
27
+ const textContent = validateContentIsString(content, prompt);
28
+ text += `USER: ${textContent}\n`;
29
29
  break;
30
30
  }
31
31
  case "assistant": {
@@ -1,7 +1,6 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.trimChatPrompt = void 0;
4
- const ChatPrompt_js_1 = require("./ChatPrompt.cjs");
5
4
  /**
6
5
  * Keeps only the most recent messages in the prompt, while leaving enough space for the completion.
7
6
  *
@@ -14,7 +13,6 @@ const ChatPrompt_js_1 = require("./ChatPrompt.cjs");
14
13
  */
15
14
  async function trimChatPrompt({ prompt, model, tokenLimit = model.contextWindowSize -
16
15
  (model.settings.maxGenerationTokens ?? model.contextWindowSize / 4), }) {
17
- (0, ChatPrompt_js_1.validateChatPrompt)(prompt);
18
16
  let minimalPrompt = {
19
17
  system: prompt.system,
20
18
  messages: [prompt.messages[prompt.messages.length - 1]], // last user message
@@ -1,5 +1,5 @@
1
1
  import { HasContextWindowSize, HasTokenizer, TextGenerationModel, TextGenerationModelSettings } from "../TextGenerationModel.js";
2
- import { TextChatPrompt } from "./ChatPrompt.js";
2
+ import { ChatPrompt } from "./ChatPrompt.js";
3
3
  /**
4
4
  * Keeps only the most recent messages in the prompt, while leaving enough space for the completion.
5
5
  *
@@ -11,7 +11,7 @@ import { TextChatPrompt } from "./ChatPrompt.js";
11
11
  * @see https://modelfusion.dev/guide/function/generate-text#limiting-the-chat-length
12
12
  */
13
13
  export declare function trimChatPrompt({ prompt, model, tokenLimit, }: {
14
- prompt: TextChatPrompt;
15
- model: TextGenerationModel<TextChatPrompt, TextGenerationModelSettings> & HasTokenizer<TextChatPrompt> & HasContextWindowSize;
14
+ prompt: ChatPrompt;
15
+ model: TextGenerationModel<ChatPrompt, TextGenerationModelSettings> & HasTokenizer<ChatPrompt> & HasContextWindowSize;
16
16
  tokenLimit?: number;
17
- }): Promise<TextChatPrompt>;
17
+ }): Promise<ChatPrompt>;
@@ -1,4 +1,3 @@
1
- import { validateChatPrompt } from "./ChatPrompt.js";
2
1
  /**
3
2
  * Keeps only the most recent messages in the prompt, while leaving enough space for the completion.
4
3
  *
@@ -11,7 +10,6 @@ import { validateChatPrompt } from "./ChatPrompt.js";
11
10
  */
12
11
  export async function trimChatPrompt({ prompt, model, tokenLimit = model.contextWindowSize -
13
12
  (model.settings.maxGenerationTokens ?? model.contextWindowSize / 4), }) {
14
- validateChatPrompt(prompt);
15
13
  let minimalPrompt = {
16
14
  system: prompt.system,
17
15
  messages: [prompt.messages[prompt.messages.length - 1]], // last user message
@@ -1,7 +1,7 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.chat = exports.instruction = exports.text = void 0;
4
- const ChatPrompt_js_1 = require("../../model-function/generate-text/prompt-template/ChatPrompt.cjs");
4
+ const Content_js_1 = require("../../model-function/generate-text/prompt-template/Content.cjs");
5
5
  const HUMAN_PREFIX = "\n\nHuman:";
6
6
  const ASSISTANT_PREFIX = "\n\nAssistant:";
7
7
  /**
@@ -26,9 +26,10 @@ exports.text = text;
26
26
  function instruction() {
27
27
  return {
28
28
  format(prompt) {
29
+ const instruction = (0, Content_js_1.validateContentIsString)(prompt.instruction, prompt);
29
30
  let text = prompt.system ?? "";
30
31
  text += HUMAN_PREFIX;
31
- text += prompt.instruction;
32
+ text += instruction;
32
33
  text += ASSISTANT_PREFIX;
33
34
  if (prompt.responsePrefix != null) {
34
35
  text += prompt.responsePrefix;
@@ -47,13 +48,13 @@ exports.instruction = instruction;
47
48
  function chat() {
48
49
  return {
49
50
  format(prompt) {
50
- (0, ChatPrompt_js_1.validateChatPrompt)(prompt);
51
51
  let text = prompt.system ?? "";
52
52
  for (const { role, content } of prompt.messages) {
53
53
  switch (role) {
54
54
  case "user": {
55
+ const textContent = (0, Content_js_1.validateContentIsString)(content, prompt);
55
56
  text += HUMAN_PREFIX;
56
- text += content;
57
+ text += textContent;
57
58
  break;
58
59
  }
59
60
  case "assistant": {
@@ -1,6 +1,6 @@
1
1
  import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
2
- import { TextChatPrompt } from "../../model-function/generate-text/prompt-template/ChatPrompt.js";
3
- import { TextInstructionPrompt } from "../../model-function/generate-text/prompt-template/InstructionPrompt.js";
2
+ import { ChatPrompt } from "../../model-function/generate-text/prompt-template/ChatPrompt.js";
3
+ import { InstructionPrompt } from "../../model-function/generate-text/prompt-template/InstructionPrompt.js";
4
4
  /**
5
5
  * Formats a text prompt as an Anthropic prompt.
6
6
  */
@@ -8,10 +8,10 @@ export declare function text(): TextGenerationPromptTemplate<string, string>;
8
8
  /**
9
9
  * Formats an instruction prompt as an Anthropic prompt.
10
10
  */
11
- export declare function instruction(): TextGenerationPromptTemplate<TextInstructionPrompt, string>;
11
+ export declare function instruction(): TextGenerationPromptTemplate<InstructionPrompt, string>;
12
12
  /**
13
13
  * Formats a chat prompt as an Anthropic prompt.
14
14
  *
15
15
  * @see https://docs.anthropic.com/claude/docs/constructing-a-prompt
16
16
  */
17
- export declare function chat(): TextGenerationPromptTemplate<TextChatPrompt, string>;
17
+ export declare function chat(): TextGenerationPromptTemplate<ChatPrompt, string>;
@@ -1,4 +1,4 @@
1
- import { validateChatPrompt, } from "../../model-function/generate-text/prompt-template/ChatPrompt.js";
1
+ import { validateContentIsString } from "../../model-function/generate-text/prompt-template/Content.js";
2
2
  const HUMAN_PREFIX = "\n\nHuman:";
3
3
  const ASSISTANT_PREFIX = "\n\nAssistant:";
4
4
  /**
@@ -22,9 +22,10 @@ export function text() {
22
22
  export function instruction() {
23
23
  return {
24
24
  format(prompt) {
25
+ const instruction = validateContentIsString(prompt.instruction, prompt);
25
26
  let text = prompt.system ?? "";
26
27
  text += HUMAN_PREFIX;
27
- text += prompt.instruction;
28
+ text += instruction;
28
29
  text += ASSISTANT_PREFIX;
29
30
  if (prompt.responsePrefix != null) {
30
31
  text += prompt.responsePrefix;
@@ -42,13 +43,13 @@ export function instruction() {
42
43
  export function chat() {
43
44
  return {
44
45
  format(prompt) {
45
- validateChatPrompt(prompt);
46
46
  let text = prompt.system ?? "";
47
47
  for (const { role, content } of prompt.messages) {
48
48
  switch (role) {
49
49
  case "user": {
50
+ const textContent = validateContentIsString(content, prompt);
50
51
  text += HUMAN_PREFIX;
51
- text += content;
52
+ text += textContent;
52
53
  break;
53
54
  }
54
55
  case "assistant": {
@@ -4,12 +4,13 @@ exports.AnthropicTextGenerationResponseFormat = exports.AnthropicTextGenerationM
4
4
  const zod_1 = require("zod");
5
5
  const callWithRetryAndThrottle_js_1 = require("../../core/api/callWithRetryAndThrottle.cjs");
6
6
  const postToApi_js_1 = require("../../core/api/postToApi.cjs");
7
- const parseEventSourceStream_js_1 = require("../../util/streaming/parseEventSourceStream.cjs");
7
+ const ZodSchema_js_1 = require("../../core/schema/ZodSchema.cjs");
8
+ const parseJSON_js_1 = require("../../core/schema/parseJSON.cjs");
8
9
  const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
9
10
  const PromptTemplateTextStreamingModel_js_1 = require("../../model-function/generate-text/PromptTemplateTextStreamingModel.cjs");
11
+ const TextGenerationModel_js_1 = require("../../model-function/generate-text/TextGenerationModel.cjs");
10
12
  const AsyncQueue_js_1 = require("../../util/AsyncQueue.cjs");
11
- const ZodSchema_js_1 = require("../../core/schema/ZodSchema.cjs");
12
- const parseJSON_js_1 = require("../../core/schema/parseJSON.cjs");
13
+ const parseEventSourceStream_js_1 = require("../../util/streaming/parseEventSourceStream.cjs");
13
14
  const AnthropicApiConfiguration_js_1 = require("./AnthropicApiConfiguration.cjs");
14
15
  const AnthropicError_js_1 = require("./AnthropicError.cjs");
15
16
  const AnthropicPromptTemplate_js_1 = require("./AnthropicPromptTemplate.cjs");
@@ -74,8 +75,8 @@ class AnthropicTextGenerationModel extends AbstractModel_js_1.AbstractModel {
74
75
  const abortSignal = options.run?.abortSignal;
75
76
  const userId = this.settings.userId;
76
77
  return (0, callWithRetryAndThrottle_js_1.callWithRetryAndThrottle)({
77
- retry: this.settings.api?.retry,
78
- throttle: this.settings.api?.throttle,
78
+ retry: api.retry,
79
+ throttle: api.throttle,
79
80
  call: async () => {
80
81
  return (0, postToApi_js_1.postJsonToApi)({
81
82
  url: api.assembleUrl(`/complete`),
@@ -100,8 +101,7 @@ class AnthropicTextGenerationModel extends AbstractModel_js_1.AbstractModel {
100
101
  }
101
102
  get settingsForEvent() {
102
103
  const eventSettingProperties = [
103
- "maxGenerationTokens",
104
- "stopSequences",
104
+ ...TextGenerationModel_js_1.textGenerationModelProperties,
105
105
  "temperature",
106
106
  "topK",
107
107
  "topP",
@@ -116,9 +116,24 @@ class AnthropicTextGenerationModel extends AbstractModel_js_1.AbstractModel {
116
116
  });
117
117
  return {
118
118
  response,
119
- texts: [response.completion],
119
+ textGenerationResults: [
120
+ {
121
+ text: response.completion,
122
+ finishReason: this.translateFinishReason(response.stop_reason),
123
+ },
124
+ ],
120
125
  };
121
126
  }
127
+ translateFinishReason(finishReason) {
128
+ switch (finishReason) {
129
+ case "stop_sequence":
130
+ return "stop";
131
+ case "max_tokens":
132
+ return "length";
133
+ default:
134
+ return "unknown";
135
+ }
136
+ }
122
137
  doStreamText(prompt, options) {
123
138
  return this.callAPI(prompt, {
124
139
  ...options,