modelfusion 0.102.0 → 0.104.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (97) hide show
  1. package/CHANGELOG.md +27 -0
  2. package/model-function/generate-text/PromptTemplateTextGenerationModel.d.ts +1 -1
  3. package/model-function/generate-text/TextGenerationModel.cjs +7 -0
  4. package/model-function/generate-text/TextGenerationModel.d.ts +3 -1
  5. package/model-function/generate-text/TextGenerationModel.js +6 -1
  6. package/model-function/generate-text/TextGenerationResult.cjs +2 -0
  7. package/model-function/generate-text/TextGenerationResult.d.ts +11 -0
  8. package/model-function/generate-text/TextGenerationResult.js +1 -0
  9. package/model-function/generate-text/generateText.cjs +14 -9
  10. package/model-function/generate-text/generateText.d.ts +3 -0
  11. package/model-function/generate-text/generateText.js +14 -9
  12. package/model-function/generate-text/index.cjs +1 -0
  13. package/model-function/generate-text/index.d.ts +1 -0
  14. package/model-function/generate-text/index.js +1 -0
  15. package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.cjs +2 -1
  16. package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.d.ts +2 -2
  17. package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.js +2 -1
  18. package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.cjs +5 -4
  19. package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.d.ts +4 -4
  20. package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.js +5 -4
  21. package/model-function/generate-text/prompt-template/ChatPrompt.cjs +0 -24
  22. package/model-function/generate-text/prompt-template/ChatPrompt.d.ts +11 -34
  23. package/model-function/generate-text/prompt-template/ChatPrompt.js +1 -22
  24. package/model-function/generate-text/prompt-template/Content.cjs +9 -0
  25. package/model-function/generate-text/prompt-template/Content.d.ts +9 -4
  26. package/model-function/generate-text/prompt-template/Content.js +7 -1
  27. package/model-function/generate-text/prompt-template/InstructionPrompt.d.ts +6 -22
  28. package/model-function/generate-text/prompt-template/Llama2PromptTemplate.cjs +36 -5
  29. package/model-function/generate-text/prompt-template/Llama2PromptTemplate.d.ts +16 -4
  30. package/model-function/generate-text/prompt-template/Llama2PromptTemplate.js +34 -4
  31. package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.cjs +5 -4
  32. package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.d.ts +4 -4
  33. package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.js +5 -4
  34. package/model-function/generate-text/prompt-template/TextPromptTemplate.cjs +3 -4
  35. package/model-function/generate-text/prompt-template/TextPromptTemplate.d.ts +4 -4
  36. package/model-function/generate-text/prompt-template/TextPromptTemplate.js +3 -4
  37. package/model-function/generate-text/prompt-template/VicunaPromptTemplate.cjs +3 -3
  38. package/model-function/generate-text/prompt-template/VicunaPromptTemplate.d.ts +2 -2
  39. package/model-function/generate-text/prompt-template/VicunaPromptTemplate.js +3 -3
  40. package/model-function/generate-text/prompt-template/trimChatPrompt.cjs +0 -2
  41. package/model-function/generate-text/prompt-template/trimChatPrompt.d.ts +4 -4
  42. package/model-function/generate-text/prompt-template/trimChatPrompt.js +0 -2
  43. package/model-provider/anthropic/AnthropicPromptTemplate.cjs +5 -4
  44. package/model-provider/anthropic/AnthropicPromptTemplate.d.ts +4 -4
  45. package/model-provider/anthropic/AnthropicPromptTemplate.js +5 -4
  46. package/model-provider/anthropic/AnthropicTextGenerationModel.cjs +23 -8
  47. package/model-provider/anthropic/AnthropicTextGenerationModel.d.ts +8 -3
  48. package/model-provider/anthropic/AnthropicTextGenerationModel.js +24 -9
  49. package/model-provider/automatic1111/Automatic1111ImageGenerationModel.d.ts +1 -1
  50. package/model-provider/cohere/CohereTextGenerationModel.cjs +22 -6
  51. package/model-provider/cohere/CohereTextGenerationModel.d.ts +8 -3
  52. package/model-provider/cohere/CohereTextGenerationModel.js +22 -6
  53. package/model-provider/elevenlabs/ElevenLabsSpeechModel.cjs +2 -2
  54. package/model-provider/elevenlabs/ElevenLabsSpeechModel.js +2 -2
  55. package/model-provider/huggingface/HuggingFaceTextGenerationModel.cjs +9 -8
  56. package/model-provider/huggingface/HuggingFaceTextGenerationModel.d.ts +4 -5
  57. package/model-provider/huggingface/HuggingFaceTextGenerationModel.js +9 -8
  58. package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.cjs +23 -16
  59. package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.d.ts +4 -4
  60. package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.js +23 -16
  61. package/model-provider/llamacpp/LlamaCppTextGenerationModel.cjs +51 -51
  62. package/model-provider/llamacpp/LlamaCppTextGenerationModel.d.ts +14 -11
  63. package/model-provider/llamacpp/LlamaCppTextGenerationModel.js +51 -51
  64. package/model-provider/mistral/MistralChatModel.cjs +19 -2
  65. package/model-provider/mistral/MistralChatModel.d.ts +8 -3
  66. package/model-provider/mistral/MistralChatModel.js +19 -2
  67. package/model-provider/mistral/MistralPromptTemplate.cjs +5 -4
  68. package/model-provider/mistral/MistralPromptTemplate.d.ts +4 -4
  69. package/model-provider/mistral/MistralPromptTemplate.js +5 -4
  70. package/model-provider/ollama/OllamaChatModel.cjs +8 -3
  71. package/model-provider/ollama/OllamaChatModel.d.ts +6 -3
  72. package/model-provider/ollama/OllamaChatModel.js +8 -3
  73. package/model-provider/ollama/OllamaChatPromptTemplate.cjs +9 -13
  74. package/model-provider/ollama/OllamaChatPromptTemplate.d.ts +4 -4
  75. package/model-provider/ollama/OllamaChatPromptTemplate.js +9 -13
  76. package/model-provider/ollama/OllamaCompletionModel.cjs +8 -3
  77. package/model-provider/ollama/OllamaCompletionModel.d.ts +4 -1
  78. package/model-provider/ollama/OllamaCompletionModel.js +8 -3
  79. package/model-provider/openai/OpenAICompletionModel.cjs +20 -4
  80. package/model-provider/openai/OpenAICompletionModel.d.ts +8 -3
  81. package/model-provider/openai/OpenAICompletionModel.js +20 -4
  82. package/model-provider/openai/chat/AbstractOpenAIChatModel.cjs +19 -1
  83. package/model-provider/openai/chat/AbstractOpenAIChatModel.d.ts +6 -1
  84. package/model-provider/openai/chat/AbstractOpenAIChatModel.js +19 -1
  85. package/model-provider/openai/chat/OpenAIChatFunctionCallStructureGenerationModel.d.ts +2 -2
  86. package/model-provider/openai/chat/OpenAIChatMessage.d.ts +2 -2
  87. package/model-provider/openai/chat/OpenAIChatModel.cjs +2 -3
  88. package/model-provider/openai/chat/OpenAIChatModel.d.ts +2 -2
  89. package/model-provider/openai/chat/OpenAIChatModel.js +2 -3
  90. package/model-provider/openai/chat/OpenAIChatPromptTemplate.cjs +0 -2
  91. package/model-provider/openai/chat/OpenAIChatPromptTemplate.d.ts +4 -4
  92. package/model-provider/openai/chat/OpenAIChatPromptTemplate.js +0 -2
  93. package/model-provider/openai-compatible/OpenAICompatibleChatModel.cjs +2 -3
  94. package/model-provider/openai-compatible/OpenAICompatibleChatModel.d.ts +2 -2
  95. package/model-provider/openai-compatible/OpenAICompatibleChatModel.js +2 -3
  96. package/model-provider/stability/StabilityImageGenerationModel.d.ts +6 -6
  97. package/package.json +2 -2
package/CHANGELOG.md CHANGED
@@ -1,5 +1,32 @@
1
1
  # Changelog
2
2
 
3
+ ## v0.104.0 - 2023-12-24
4
+
5
+ ### Changed
6
+
7
+ - **breaking change**: Unified text and multimodal prompt templates. `[Text/MultiModal]InstructionPrompt` is now `InstructionPrompt`, and `[Text/MultiModalChatPrompt]` is now `ChatPrompt`.
8
+ - More flexible chat prompts: The chat prompt validation is now chat template specific and validated at runtime. E.g. the Llama2 prompt template only supports turns of user and assistant messages, whereas other formats are more flexible.
9
+
10
+ ## v0.103.0 - 2023-12-23
11
+
12
+ ### Added
13
+
14
+ - `finishReason` support for `generateText`.
15
+
16
+ The finish reason can be `stop` (the model stopped because it generated a stop sequence), `length` (the model stopped because it generated the maximum number of tokens), `content-filter` (the model stopped because the content filter detected a violation), `tool-calls` (the model stopped because it triggered a tool call), `error` (the model stopped because of an error), `other` (the model stopped for another reason), or `unknown` (the model stop reason is not know or the model does not support finish reasons).
17
+
18
+ You can extract it from the full response when using `fullResponse: true`:
19
+
20
+ ```ts
21
+ const { text, finishReason } = await generateText(
22
+ openai
23
+ .ChatTextGenerator({ model: "gpt-3.5-turbo", maxGenerationTokens: 200 })
24
+ .withTextPrompt(),
25
+ "Write a short story about a robot learning to love:",
26
+ { fullResponse: true }
27
+ );
28
+ ```
29
+
3
30
  ## v0.102.0 - 2023-12-22
4
31
 
5
32
  ### Added
@@ -19,7 +19,7 @@ export declare class PromptTemplateTextGenerationModel<PROMPT, MODEL_PROMPT, SET
19
19
  get countPromptTokens(): MODEL["countPromptTokens"] extends undefined ? undefined : (prompt: PROMPT) => PromiseLike<number>;
20
20
  doGenerateTexts(prompt: PROMPT, options?: FunctionOptions): PromiseLike<{
21
21
  response: unknown;
22
- texts: string[];
22
+ textGenerationResults: import("./TextGenerationResult.js").TextGenerationResult[];
23
23
  usage?: {
24
24
  promptTokens: number;
25
25
  completionTokens: number;
@@ -1,2 +1,9 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.textGenerationModelProperties = void 0;
4
+ exports.textGenerationModelProperties = [
5
+ "maxGenerationTokens",
6
+ "stopSequences",
7
+ "numberOfGenerations",
8
+ "trimWhitespace",
9
+ ];
@@ -3,6 +3,8 @@ import { Delta } from "../Delta.js";
3
3
  import { Model, ModelSettings } from "../Model.js";
4
4
  import { BasicTokenizer, FullTokenizer } from "../tokenize-text/Tokenizer.js";
5
5
  import { TextGenerationPromptTemplate } from "./TextGenerationPromptTemplate.js";
6
+ import { TextGenerationResult } from "./TextGenerationResult.js";
7
+ export declare const textGenerationModelProperties: readonly ["maxGenerationTokens", "stopSequences", "numberOfGenerations", "trimWhitespace"];
6
8
  export interface TextGenerationModelSettings extends ModelSettings {
7
9
  /**
8
10
  * Specifies the maximum number of tokens (words, punctuation, parts of words) that the model can generate in a single response.
@@ -62,7 +64,7 @@ export interface TextGenerationModel<PROMPT, SETTINGS extends TextGenerationMode
62
64
  readonly countPromptTokens: ((prompt: PROMPT) => PromiseLike<number>) | undefined;
63
65
  doGenerateTexts(prompt: PROMPT, options?: FunctionOptions): PromiseLike<{
64
66
  response: unknown;
65
- texts: string[];
67
+ textGenerationResults: TextGenerationResult[];
66
68
  usage?: {
67
69
  promptTokens: number;
68
70
  completionTokens: number;
@@ -1 +1,6 @@
1
- export {};
1
+ export const textGenerationModelProperties = [
2
+ "maxGenerationTokens",
3
+ "stopSequences",
4
+ "numberOfGenerations",
5
+ "trimWhitespace",
6
+ ];
@@ -0,0 +1,2 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
@@ -0,0 +1,11 @@
1
+ export type TextGenerationResult = {
2
+ /**
3
+ * The generated text.
4
+ */
5
+ text: string;
6
+ /**
7
+ * The reason why the generation stopped.
8
+ */
9
+ finishReason: TextGenerationFinishReason;
10
+ };
11
+ export type TextGenerationFinishReason = "stop" | "length" | "content-filter" | "tool-calls" | "error" | "other" | "unknown";
@@ -11,25 +11,30 @@ async function generateText(model, prompt, options) {
11
11
  generateResponse: async (options) => {
12
12
  const result = await model.doGenerateTexts(prompt, options);
13
13
  const shouldTrimWhitespace = model.settings.trimWhitespace ?? true;
14
- const texts = shouldTrimWhitespace
15
- ? result.texts.map((text) => text.trim())
16
- : result.texts;
14
+ const textGenerationResults = shouldTrimWhitespace
15
+ ? result.textGenerationResults.map((textGeneration) => ({
16
+ text: textGeneration.text.trim(),
17
+ finishReason: textGeneration.finishReason,
18
+ }))
19
+ : result.textGenerationResults;
17
20
  return {
18
21
  response: result.response,
19
- extractedValue: texts,
22
+ extractedValue: textGenerationResults,
20
23
  usage: result.usage,
21
24
  };
22
25
  },
23
26
  });
24
- const texts = fullResponse.value;
25
- const text = texts[0];
27
+ const textGenerationResults = fullResponse.value;
28
+ const firstResult = textGenerationResults[0];
26
29
  return options?.fullResponse
27
30
  ? {
28
- text,
29
- texts,
31
+ text: firstResult.text,
32
+ finishReason: firstResult.finishReason,
33
+ texts: textGenerationResults.map((textGeneration) => textGeneration.text),
34
+ textGenerationResults,
30
35
  response: fullResponse.response,
31
36
  metadata: fullResponse.metadata,
32
37
  }
33
- : text;
38
+ : firstResult.text;
34
39
  }
35
40
  exports.generateText = generateText;
@@ -1,6 +1,7 @@
1
1
  import { FunctionOptions } from "../../core/FunctionOptions.js";
2
2
  import { ModelCallMetadata } from "../ModelCallMetadata.js";
3
3
  import { TextGenerationModel, TextGenerationModelSettings } from "./TextGenerationModel.js";
4
+ import { TextGenerationFinishReason, TextGenerationResult } from "./TextGenerationResult.js";
4
5
  /**
5
6
  * Generate text for a prompt and return it as a string.
6
7
  *
@@ -29,7 +30,9 @@ export declare function generateText<PROMPT>(model: TextGenerationModel<PROMPT,
29
30
  fullResponse: true;
30
31
  }): Promise<{
31
32
  text: string;
33
+ finishReason: TextGenerationFinishReason;
32
34
  texts: string[];
35
+ textGenerationResults: TextGenerationResult[];
33
36
  response: unknown;
34
37
  metadata: ModelCallMetadata;
35
38
  }>;
@@ -8,24 +8,29 @@ export async function generateText(model, prompt, options) {
8
8
  generateResponse: async (options) => {
9
9
  const result = await model.doGenerateTexts(prompt, options);
10
10
  const shouldTrimWhitespace = model.settings.trimWhitespace ?? true;
11
- const texts = shouldTrimWhitespace
12
- ? result.texts.map((text) => text.trim())
13
- : result.texts;
11
+ const textGenerationResults = shouldTrimWhitespace
12
+ ? result.textGenerationResults.map((textGeneration) => ({
13
+ text: textGeneration.text.trim(),
14
+ finishReason: textGeneration.finishReason,
15
+ }))
16
+ : result.textGenerationResults;
14
17
  return {
15
18
  response: result.response,
16
- extractedValue: texts,
19
+ extractedValue: textGenerationResults,
17
20
  usage: result.usage,
18
21
  };
19
22
  },
20
23
  });
21
- const texts = fullResponse.value;
22
- const text = texts[0];
24
+ const textGenerationResults = fullResponse.value;
25
+ const firstResult = textGenerationResults[0];
23
26
  return options?.fullResponse
24
27
  ? {
25
- text,
26
- texts,
28
+ text: firstResult.text,
29
+ finishReason: firstResult.finishReason,
30
+ texts: textGenerationResults.map((textGeneration) => textGeneration.text),
31
+ textGenerationResults,
27
32
  response: fullResponse.response,
28
33
  metadata: fullResponse.metadata,
29
34
  }
30
- : text;
35
+ : firstResult.text;
31
36
  }
@@ -19,6 +19,7 @@ __exportStar(require("./PromptTemplateTextStreamingModel.cjs"), exports);
19
19
  __exportStar(require("./TextGenerationEvent.cjs"), exports);
20
20
  __exportStar(require("./TextGenerationModel.cjs"), exports);
21
21
  __exportStar(require("./TextGenerationPromptTemplate.cjs"), exports);
22
+ __exportStar(require("./TextGenerationResult.cjs"), exports);
22
23
  __exportStar(require("./generateText.cjs"), exports);
23
24
  __exportStar(require("./prompt-template/index.cjs"), exports);
24
25
  __exportStar(require("./streamText.cjs"), exports);
@@ -3,6 +3,7 @@ export * from "./PromptTemplateTextStreamingModel.js";
3
3
  export * from "./TextGenerationEvent.js";
4
4
  export * from "./TextGenerationModel.js";
5
5
  export * from "./TextGenerationPromptTemplate.js";
6
+ export * from "./TextGenerationResult.js";
6
7
  export * from "./generateText.js";
7
8
  export * from "./prompt-template/index.js";
8
9
  export * from "./streamText.js";
@@ -3,6 +3,7 @@ export * from "./PromptTemplateTextStreamingModel.js";
3
3
  export * from "./TextGenerationEvent.js";
4
4
  export * from "./TextGenerationModel.js";
5
5
  export * from "./TextGenerationPromptTemplate.js";
6
+ export * from "./TextGenerationResult.js";
6
7
  export * from "./generateText.js";
7
8
  export * from "./prompt-template/index.js";
8
9
  export * from "./streamText.js";
@@ -1,6 +1,7 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.instruction = exports.text = void 0;
4
+ const Content_js_1 = require("./Content.cjs");
4
5
  const DEFAULT_SYSTEM_PROMPT_INPUT = "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.";
5
6
  const DEFAULT_SYSTEM_PROMPT_NO_INPUT = "Below is an instruction that describes a task. Write a response that appropriately completes the request.";
6
7
  /**
@@ -67,7 +68,7 @@ function instruction() {
67
68
  if (prompt.system != null) {
68
69
  text += `${prompt.system}\n`;
69
70
  }
70
- text += prompt.instruction;
71
+ text += (0, Content_js_1.validateContentIsString)(prompt.instruction, prompt);
71
72
  if (prompt.input != null) {
72
73
  text += `\n\n### Input:\n${prompt.input}`;
73
74
  }
@@ -1,5 +1,5 @@
1
1
  import { TextGenerationPromptTemplate } from "../TextGenerationPromptTemplate.js";
2
- import { TextInstructionPrompt } from "./InstructionPrompt.js";
2
+ import { InstructionPrompt } from "./InstructionPrompt.js";
3
3
  /**
4
4
  * Formats a text prompt as an Alpaca prompt.
5
5
  */
@@ -40,7 +40,7 @@ export declare function text(): TextGenerationPromptTemplate<string, string>;
40
40
  *
41
41
  * @see https://github.com/tatsu-lab/stanford_alpaca#data-release
42
42
  */
43
- export declare function instruction(): TextGenerationPromptTemplate<TextInstructionPrompt & {
43
+ export declare function instruction(): TextGenerationPromptTemplate<InstructionPrompt & {
44
44
  input?: string;
45
45
  }, // optional input supported by Alpaca
46
46
  string>;
@@ -1,3 +1,4 @@
1
+ import { validateContentIsString } from "./Content.js";
1
2
  const DEFAULT_SYSTEM_PROMPT_INPUT = "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.";
2
3
  const DEFAULT_SYSTEM_PROMPT_NO_INPUT = "Below is an instruction that describes a task. Write a response that appropriately completes the request.";
3
4
  /**
@@ -63,7 +64,7 @@ export function instruction() {
63
64
  if (prompt.system != null) {
64
65
  text += `${prompt.system}\n`;
65
66
  }
66
- text += prompt.instruction;
67
+ text += validateContentIsString(prompt.instruction, prompt);
67
68
  if (prompt.input != null) {
68
69
  text += `\n\n### Input:\n${prompt.input}`;
69
70
  }
@@ -1,7 +1,7 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.chat = exports.instruction = exports.text = void 0;
4
- const ChatPrompt_js_1 = require("./ChatPrompt.cjs");
4
+ const Content_js_1 = require("./Content.cjs");
5
5
  const START_SEGMENT = "<|im_start|>";
6
6
  const END_SEGMENT = "<|im_end|>";
7
7
  function segmentStart(role) {
@@ -40,8 +40,9 @@ function instruction() {
40
40
  return {
41
41
  stopSequences: [END_SEGMENT],
42
42
  format(prompt) {
43
+ const instruction = (0, Content_js_1.validateContentIsString)(prompt.instruction, prompt);
43
44
  return (segment("system", prompt.system) +
44
- segment("user", prompt.instruction) +
45
+ segment("user", instruction) +
45
46
  segmentStart("assistant") +
46
47
  (prompt.responsePrefix ?? ""));
47
48
  },
@@ -64,12 +65,12 @@ exports.instruction = instruction;
64
65
  function chat() {
65
66
  return {
66
67
  format(prompt) {
67
- (0, ChatPrompt_js_1.validateChatPrompt)(prompt);
68
68
  let text = prompt.system != null ? segment("system", prompt.system) : "";
69
69
  for (const { role, content } of prompt.messages) {
70
70
  switch (role) {
71
71
  case "user": {
72
- text += segment("user", content);
72
+ const textContent = (0, Content_js_1.validateContentIsString)(content, prompt);
73
+ text += segment("user", textContent);
73
74
  break;
74
75
  }
75
76
  case "assistant": {
@@ -1,6 +1,6 @@
1
1
  import { TextGenerationPromptTemplate } from "../TextGenerationPromptTemplate.js";
2
- import { TextChatPrompt } from "./ChatPrompt.js";
3
- import { TextInstructionPrompt } from "./InstructionPrompt.js";
2
+ import { ChatPrompt } from "./ChatPrompt.js";
3
+ import { InstructionPrompt } from "./InstructionPrompt.js";
4
4
  /**
5
5
  * Formats a text prompt using the ChatML format.
6
6
  */
@@ -18,7 +18,7 @@ export declare function text(): TextGenerationPromptTemplate<string, string>;
18
18
  * ${response prefix}
19
19
  * ```
20
20
  */
21
- export declare function instruction(): TextGenerationPromptTemplate<TextInstructionPrompt, string>;
21
+ export declare function instruction(): TextGenerationPromptTemplate<InstructionPrompt, string>;
22
22
  /**
23
23
  * Formats a chat prompt using the ChatML format.
24
24
  *
@@ -32,4 +32,4 @@ export declare function instruction(): TextGenerationPromptTemplate<TextInstruct
32
32
  * Paris<|im_end|>
33
33
  * ```
34
34
  */
35
- export declare function chat(): TextGenerationPromptTemplate<TextChatPrompt, string>;
35
+ export declare function chat(): TextGenerationPromptTemplate<ChatPrompt, string>;
@@ -1,4 +1,4 @@
1
- import { validateChatPrompt } from "./ChatPrompt.js";
1
+ import { validateContentIsString } from "./Content.js";
2
2
  const START_SEGMENT = "<|im_start|>";
3
3
  const END_SEGMENT = "<|im_end|>";
4
4
  function segmentStart(role) {
@@ -36,8 +36,9 @@ export function instruction() {
36
36
  return {
37
37
  stopSequences: [END_SEGMENT],
38
38
  format(prompt) {
39
+ const instruction = validateContentIsString(prompt.instruction, prompt);
39
40
  return (segment("system", prompt.system) +
40
- segment("user", prompt.instruction) +
41
+ segment("user", instruction) +
41
42
  segmentStart("assistant") +
42
43
  (prompt.responsePrefix ?? ""));
43
44
  },
@@ -59,12 +60,12 @@ export function instruction() {
59
60
  export function chat() {
60
61
  return {
61
62
  format(prompt) {
62
- validateChatPrompt(prompt);
63
63
  let text = prompt.system != null ? segment("system", prompt.system) : "";
64
64
  for (const { role, content } of prompt.messages) {
65
65
  switch (role) {
66
66
  case "user": {
67
- text += segment("user", content);
67
+ const textContent = validateContentIsString(content, prompt);
68
+ text += segment("user", textContent);
68
69
  break;
69
70
  }
70
71
  case "assistant": {
@@ -1,26 +1,2 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.validateChatPrompt = void 0;
4
- const InvalidPromptError_js_1 = require("./InvalidPromptError.cjs");
5
- /**
6
- * Checks if a chat prompt is valid. Throws a {@link ChatPromptValidationError} if it's not.
7
- *
8
- * @throws {@link ChatPromptValidationError}
9
- */
10
- function validateChatPrompt(chatPrompt) {
11
- const messages = chatPrompt.messages;
12
- if (messages.length < 1) {
13
- throw new InvalidPromptError_js_1.InvalidPromptError("ChatPrompt should have at least one message.", chatPrompt);
14
- }
15
- for (let i = 0; i < messages.length; i++) {
16
- const expectedRole = i % 2 === 0 ? "user" : "assistant";
17
- const role = messages[i].role;
18
- if (role !== expectedRole) {
19
- throw new InvalidPromptError_js_1.InvalidPromptError(`Message at index ${i} should have role '${expectedRole}', but has role '${role}'.`, chatPrompt);
20
- }
21
- }
22
- if (messages.length % 2 === 0) {
23
- throw new InvalidPromptError_js_1.InvalidPromptError("The last message must be a user message.", chatPrompt);
24
- }
25
- }
26
- exports.validateChatPrompt = validateChatPrompt;
@@ -1,15 +1,10 @@
1
- import { MultiModalInput } from "./Content.js";
1
+ import { Content } from "./Content.js";
2
2
  /**
3
- * A textual chat prompt is a combination of a system message and a list of messages with the following constraints:
3
+ * A chat prompt is a combination of a system message and a list of user and assistant messages.
4
4
  *
5
- * - A chat prompt can optionally have a system message.
6
- * - The first message of the chat must be a user message.
7
- * - Then it must be alternating between an assistant message and a user message.
8
- * - The last message must always be a user message (when submitting to a model).
5
+ * The user messages can contain multi-modal content.
9
6
  *
10
- * You can use a ChatPrompt without an final user message when you e.g. want to display the current state of a conversation.
11
- *
12
- * The type checking is done at runtime when you submit a chat prompt to a model with a prompt template.
7
+ * Note: Not all models and prompt formats support multi-modal inputs.
13
8
  *
14
9
  * @example
15
10
  * ```ts
@@ -22,38 +17,20 @@ import { MultiModalInput } from "./Content.js";
22
17
  * ],
23
18
  * };
24
19
  * ```
25
- *
26
- * @see validateChatPrompt
27
20
  */
28
- export interface TextChatPrompt {
21
+ export interface ChatPrompt {
29
22
  system?: string;
30
- messages: Array<TextChatMessage>;
23
+ messages: Array<ChatMessage>;
31
24
  }
32
25
  /**
33
- * A text message in a chat prompt.
34
- * @see TextChatPrompt
26
+ * A message in a chat prompt.
27
+ *
28
+ * @see ChatPrompt
35
29
  */
36
- export type TextChatMessage = {
37
- role: "user";
38
- content: string;
39
- } | {
40
- role: "assistant";
41
- content: string;
42
- };
43
- export interface MultiModalChatPrompt {
44
- system?: string;
45
- messages: Array<MultiModalChatMessage>;
46
- }
47
- export type MultiModalChatMessage = {
30
+ export type ChatMessage = {
48
31
  role: "user";
49
- content: MultiModalInput;
32
+ content: Content;
50
33
  } | {
51
34
  role: "assistant";
52
35
  content: string;
53
36
  };
54
- /**
55
- * Checks if a chat prompt is valid. Throws a {@link ChatPromptValidationError} if it's not.
56
- *
57
- * @throws {@link ChatPromptValidationError}
58
- */
59
- export declare function validateChatPrompt(chatPrompt: TextChatPrompt | MultiModalChatPrompt): void;
@@ -1,22 +1 @@
1
- import { InvalidPromptError } from "./InvalidPromptError.js";
2
- /**
3
- * Checks if a chat prompt is valid. Throws a {@link ChatPromptValidationError} if it's not.
4
- *
5
- * @throws {@link ChatPromptValidationError}
6
- */
7
- export function validateChatPrompt(chatPrompt) {
8
- const messages = chatPrompt.messages;
9
- if (messages.length < 1) {
10
- throw new InvalidPromptError("ChatPrompt should have at least one message.", chatPrompt);
11
- }
12
- for (let i = 0; i < messages.length; i++) {
13
- const expectedRole = i % 2 === 0 ? "user" : "assistant";
14
- const role = messages[i].role;
15
- if (role !== expectedRole) {
16
- throw new InvalidPromptError(`Message at index ${i} should have role '${expectedRole}', but has role '${role}'.`, chatPrompt);
17
- }
18
- }
19
- if (messages.length % 2 === 0) {
20
- throw new InvalidPromptError("The last message must be a user message.", chatPrompt);
21
- }
22
- }
1
+ export {};
@@ -1,2 +1,11 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.validateContentIsString = void 0;
4
+ const InvalidPromptError_js_1 = require("./InvalidPromptError.cjs");
5
+ function validateContentIsString(content, prompt) {
6
+ if (typeof content !== "string") {
7
+ throw new InvalidPromptError_js_1.InvalidPromptError("only text prompts are are supported by this prompt template", prompt);
8
+ }
9
+ return content;
10
+ }
11
+ exports.validateContentIsString = validateContentIsString;
@@ -1,13 +1,17 @@
1
- export type MultiModalInput = Array<Content>;
2
- export type Content = TextContent | ImageContent;
3
- export interface TextContent {
1
+ /**
2
+ * Content can either be a simple text content (`string`) or a
3
+ * complex multi-modal content that is a mix of text parts and
4
+ * image parts.
5
+ */
6
+ export type Content = string | Array<TextPart | ImagePart>;
7
+ export interface TextPart {
4
8
  type: "text";
5
9
  /**
6
10
  * The text content.
7
11
  */
8
12
  text: string;
9
13
  }
10
- export interface ImageContent {
14
+ export interface ImagePart {
11
15
  type: "image";
12
16
  /**
13
17
  * Base-64 encoded image.
@@ -18,3 +22,4 @@ export interface ImageContent {
18
22
  */
19
23
  mimeType?: string;
20
24
  }
25
+ export declare function validateContentIsString(content: Content, prompt: unknown): string;
@@ -1 +1,7 @@
1
- export {};
1
+ import { InvalidPromptError } from "./InvalidPromptError.js";
2
+ export function validateContentIsString(content, prompt) {
3
+ if (typeof content !== "string") {
4
+ throw new InvalidPromptError("only text prompts are are supported by this prompt template", prompt);
5
+ }
6
+ return content;
7
+ }
@@ -1,24 +1,10 @@
1
- import { MultiModalInput } from "./Content.js";
2
- /**
3
- * A single multi-modal instruction prompt. It can contain an optional system message to define
4
- * the role and behavior of the language model.
5
- * The instruction is a multi-model input (`array` of content).
6
- */
7
- export interface MultiModalInstructionPrompt {
8
- /**
9
- * Optional system message to provide context for the language model. Note that for some models,
10
- * changing the system message can impact the results, because the model may be trained on the default system message.
11
- */
12
- system?: string;
13
- /**
14
- * The multi-modal instruction for the model.
15
- */
16
- instruction: MultiModalInput;
17
- }
1
+ import { Content } from "./Content.js";
18
2
  /**
19
3
  * A single text instruction prompt. It can contain an optional system message to define
20
4
  * the role and behavior of the language model.
21
5
  *
6
+ * The instruction can be a text instruction or a multi-modal instruction.
7
+ *
22
8
  * @example
23
9
  * ```ts
24
10
  * {
@@ -27,21 +13,19 @@ export interface MultiModalInstructionPrompt {
27
13
  * }
28
14
  * ```
29
15
  */
30
- export interface TextInstructionPrompt {
16
+ export interface InstructionPrompt {
31
17
  /**
32
18
  * Optional system message to provide context for the language model. Note that for some models,
33
19
  * changing the system message can impact the results, because the model may be trained on the default system message.
34
20
  */
35
21
  system?: string;
36
22
  /**
37
- * The text instruction for the model.
23
+ * The instruction for the model.
38
24
  */
39
- instruction: string;
25
+ instruction: Content;
40
26
  /**
41
27
  * Response prefix that will be injected in the prompt at the beginning of the response.
42
28
  * This is useful for guiding the model by starting its response with a specific text.
43
- *
44
- * Note: Not all models support this feature. E.g. it is not supported by OpenAI chat models.
45
29
  */
46
30
  responsePrefix?: string;
47
31
  }