modelfusion 0.106.0 → 0.107.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. package/CHANGELOG.md +15 -0
  2. package/README.md +8 -49
  3. package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.test.cjs +11 -0
  4. package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.test.js +11 -0
  5. package/model-function/generate-text/prompt-template/Llama2PromptTemplate.cjs +9 -7
  6. package/model-function/generate-text/prompt-template/Llama2PromptTemplate.js +9 -7
  7. package/model-function/generate-text/prompt-template/Llama2PromptTemplate.test.cjs +11 -0
  8. package/model-function/generate-text/prompt-template/Llama2PromptTemplate.test.js +11 -0
  9. package/model-function/generate-text/prompt-template/MistralInstructPromptTemplate.cjs +150 -0
  10. package/model-function/generate-text/prompt-template/MistralInstructPromptTemplate.d.ts +62 -0
  11. package/model-function/generate-text/prompt-template/MistralInstructPromptTemplate.js +143 -0
  12. package/model-function/generate-text/prompt-template/MistralInstructPromptTemplate.test.cjs +60 -0
  13. package/model-function/generate-text/prompt-template/MistralInstructPromptTemplate.test.js +58 -0
  14. package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.test.cjs +11 -0
  15. package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.test.js +11 -0
  16. package/model-function/generate-text/prompt-template/TextPromptTemplate.test.cjs +11 -0
  17. package/model-function/generate-text/prompt-template/TextPromptTemplate.test.js +11 -0
  18. package/model-function/generate-text/prompt-template/VicunaPromptTemplate.test.cjs +11 -0
  19. package/model-function/generate-text/prompt-template/VicunaPromptTemplate.test.js +11 -0
  20. package/model-function/generate-text/prompt-template/index.cjs +2 -1
  21. package/model-function/generate-text/prompt-template/index.d.ts +1 -0
  22. package/model-function/generate-text/prompt-template/index.js +1 -0
  23. package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.d.ts +3 -3
  24. package/model-provider/llamacpp/{LlamaCppTextGenerationModel.cjs → LlamaCppCompletionModel.cjs} +8 -8
  25. package/model-provider/llamacpp/{LlamaCppTextGenerationModel.d.ts → LlamaCppCompletionModel.d.ts} +26 -26
  26. package/model-provider/llamacpp/{LlamaCppTextGenerationModel.js → LlamaCppCompletionModel.js} +6 -6
  27. package/model-provider/llamacpp/{LlamaCppTextGenerationModel.test.cjs → LlamaCppCompletionModel.test.cjs} +3 -3
  28. package/model-provider/llamacpp/LlamaCppCompletionModel.test.d.ts +1 -0
  29. package/model-provider/llamacpp/{LlamaCppTextGenerationModel.test.js → LlamaCppCompletionModel.test.js} +3 -3
  30. package/model-provider/llamacpp/LlamaCppFacade.cjs +2 -2
  31. package/model-provider/llamacpp/LlamaCppFacade.d.ts +2 -2
  32. package/model-provider/llamacpp/LlamaCppFacade.js +2 -2
  33. package/model-provider/llamacpp/index.cjs +1 -1
  34. package/model-provider/llamacpp/index.d.ts +1 -1
  35. package/model-provider/llamacpp/index.js +1 -1
  36. package/model-provider/mistral/MistralChatModel.cjs +4 -4
  37. package/model-provider/mistral/MistralChatModel.d.ts +6 -6
  38. package/model-provider/mistral/MistralChatModel.js +1 -1
  39. package/model-provider/mistral/index.cjs +3 -3
  40. package/model-provider/mistral/index.d.ts +2 -2
  41. package/model-provider/mistral/index.js +2 -2
  42. package/model-provider/openai/AbstractOpenAIChatModel.d.ts +8 -8
  43. package/model-provider/openai/OpenAIChatFunctionCallStructureGenerationModel.d.ts +1 -1
  44. package/model-provider/openai/OpenAICompletionModel.d.ts +6 -6
  45. package/package.json +1 -1
  46. package/tool/generate-tool-call/index.cjs +1 -0
  47. package/tool/generate-tool-call/index.d.ts +1 -0
  48. package/tool/generate-tool-call/index.js +1 -0
  49. package/tool/generate-tool-call/jsonToolCallPrompt.cjs +30 -0
  50. package/tool/generate-tool-call/jsonToolCallPrompt.d.ts +5 -0
  51. package/tool/generate-tool-call/jsonToolCallPrompt.js +27 -0
  52. /package/{model-provider/llamacpp/LlamaCppTextGenerationModel.test.d.ts → model-function/generate-text/prompt-template/MistralInstructPromptTemplate.test.d.ts} +0 -0
  53. /package/model-provider/mistral/{MistralPromptTemplate.cjs → MistralChatPromptTemplate.cjs} +0 -0
  54. /package/model-provider/mistral/{MistralPromptTemplate.d.ts → MistralChatPromptTemplate.d.ts} +0 -0
  55. /package/model-provider/mistral/{MistralPromptTemplate.js → MistralChatPromptTemplate.js} +0 -0
package/CHANGELOG.md CHANGED
@@ -1,5 +1,20 @@
1
1
  # Changelog
2
2
 
3
+ ## v0.107.0 - 2023-12-29
4
+
5
+ ### Added
6
+
7
+ - Mistral instruct prompt template
8
+
9
+ ### Changed
10
+
11
+ - **breaking change**: Renamed `LlamaCppTextGenerationModel` to `LlamaCppCompletionModel`.
12
+
13
+ ### Fixed
14
+
15
+ - Updated `LlamaCppCompletionModel` to the latest llama.cpp version.
16
+ - Fixed formatting of system prompt for chats in Llama2 2 prompt template.
17
+
3
18
  ## v0.106.0 - 2023-12-28
4
19
 
5
20
  Experimental features that are unlikely to become stable before v1.0 have been moved to a separate `modelfusion-experimental` package.
package/README.md CHANGED
@@ -16,12 +16,13 @@
16
16
 
17
17
  - **Vendor-neutral**: ModelFusion is a non-commercial open source project that is community-driven. You can use it with any supported provider.
18
18
  - **Multi-modal**: ModelFusion supports a wide range of models including text generation, image generation, vision, text-to-speech, speech-to-text, and embedding models.
19
- - **Type inference and validation**: ModelFusion infers TypeScript types wherever possible and to validates model responses.
19
+ - **Type inference and validation**: ModelFusion infers TypeScript types wherever possible and validates model responses.
20
20
  - **Observability and logging**: ModelFusion provides an observer framework and out-of-the-box logging support.
21
- - **Resilience and Robustness**: ModelFusion ensures seamless operation through automatic retries, throttling, and error handling mechanisms.
21
+ - **Resilience and robustness**: ModelFusion ensures seamless operation through automatic retries, throttling, and error handling mechanisms.
22
+ - **Built for production**: ModelFusion is fully tree-shakeable, can be used in serverless environments, and only uses a minimal set of dependencies.
22
23
 
23
24
  > [!NOTE]
24
- > ModelFusion is in its initial development phase. The main API is now mostly stable, but until version 1.0 there may be breaking changes. Feedback and suggestions are welcome.
25
+ > ModelFusion is getting closer to a stable v1, which is expected in Q1/2024. The main API is now mostly stable, but until version 1.0 there may be breaking changes. Feedback and suggestions are welcome.
25
26
 
26
27
  ## Quick Install
27
28
 
@@ -44,8 +45,8 @@ You can provide API keys for the different [integrations](https://modelfusion.de
44
45
 
45
46
  ### [Generate Text](https://modelfusion.dev/guide/function/generate-text)
46
47
 
47
- Generate text using a language model and a prompt. You can stream the text if it is supported by the model. You can use images for multi-modal prompting if the model supports it (e.g. with [llama.cpp](https://modelfusion.dev/guide/)).
48
- You can use [prompt templates](https://modelfusion.dev/guide/function/generate-text#prompt-format) to change the prompt template of a model.
48
+ Generate text using a language model and a prompt. You can stream the text if it is supported by the model. You can use images for multi-modal prompting if the model supports it (e.g. with [llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp)).
49
+ You can use [prompt templates](https://modelfusion.dev/guide/function/generate-text#prompt-template) to change the prompt template of a model.
49
50
 
50
51
  #### generateText
51
52
 
@@ -58,7 +59,7 @@ const text = await generateText(
58
59
  );
59
60
  ```
60
61
 
61
- Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [OpenAI compatible](https://modelfusion.dev/integration/model-provider/openaicompatible), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp), [Ollama](https://modelfusion.dev/integration/model-provider/ollama), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp), [Mistral](https://modelfusion.dev/integration/model-provider/mistral), [Hugging Face](https://modelfusion.dev/integration/model-provider/huggingface), [Cohere](https://modelfusion.dev/integration/model-provider/cohere), [Anthropic](https://modelfusion.dev/integration/model-provider/anthropic)
62
+ Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [OpenAI compatible](https://modelfusion.dev/integration/model-provider/openaicompatible), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp), [Ollama](https://modelfusion.dev/integration/model-provider/ollama), [Mistral](https://modelfusion.dev/integration/model-provider/mistral), [Hugging Face](https://modelfusion.dev/integration/model-provider/huggingface), [Cohere](https://modelfusion.dev/integration/model-provider/cohere), [Anthropic](https://modelfusion.dev/integration/model-provider/anthropic)
62
63
 
63
64
  #### streamText
64
65
 
@@ -323,53 +324,11 @@ const reconstructedText = await tokenizer.detokenize(tokens);
323
324
 
324
325
  Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp), [Cohere](https://modelfusion.dev/integration/model-provider/cohere)
325
326
 
326
- ### [Guards](https://modelfusion.dev/guide/guard)
327
-
328
- Guard functions can be used to implement retry on error, redacting and changing reponses, etc.
329
-
330
- #### Retry structure parsing on error
331
-
332
- ```ts
333
- const result = await guard(
334
- (input, options) =>
335
- generateStructure(
336
- openai
337
- .ChatTextGenerator({
338
- // ...
339
- })
340
- .asFunctionCallStructureGenerationModel({
341
- fnName: "myFunction",
342
- }),
343
- zodSchema({
344
- // ...
345
- }),
346
- input,
347
- options
348
- ),
349
- [
350
- // ...
351
- ],
352
- fixStructure({
353
- modifyInputForRetry: async ({ input, error }) => [
354
- ...input,
355
- openai.ChatMessage.assistant(null, {
356
- functionCall: {
357
- name: "sentiment",
358
- arguments: JSON.stringify(error.valueText),
359
- },
360
- }),
361
- openai.ChatMessage.user(error.message),
362
- openai.ChatMessage.user("Please fix the error and try again."),
363
- ],
364
- })
365
- );
366
- ```
367
-
368
327
  ### [Tools](https://modelfusion.dev/guide/tools)
369
328
 
370
329
  Tools are functions that can be executed by an AI model. They are useful for building chatbots and agents.
371
330
 
372
- Predefined tools: [SerpAPI](https://modelfusion.dev/integration/tool/serpapi), [Google Custom Search](https://modelfusion.dev/integration/tool/google-custom-search)
331
+ Predefined tools: [Math.js](https://modelfusion.dev/guide/tools/predefined-tools/mathjs), [SerpAPI](https://modelfusion.dev/guide/tools/predefined-tools/serpapi), [Google Custom Search](https://modelfusion.dev/guide/tools/predefined-tools/google-custom-search)
373
332
 
374
333
  #### [Creating Tools](https://modelfusion.dev/guide/tools/create-tools)
375
334
 
@@ -46,4 +46,15 @@ describe("chat prompt", () => {
46
46
  });
47
47
  expect(prompt).toMatchSnapshot();
48
48
  });
49
+ it("should format prompt with system message and user-assistant-user messages", () => {
50
+ const prompt = (0, ChatMLPromptTemplate_js_1.chat)().format({
51
+ system: "you are a chatbot",
52
+ messages: [
53
+ { role: "user", content: "1st user message" },
54
+ { role: "assistant", content: "assistant message" },
55
+ { role: "user", content: "2nd user message" },
56
+ ],
57
+ });
58
+ expect(prompt).toMatchSnapshot();
59
+ });
49
60
  });
@@ -44,4 +44,15 @@ describe("chat prompt", () => {
44
44
  });
45
45
  expect(prompt).toMatchSnapshot();
46
46
  });
47
+ it("should format prompt with system message and user-assistant-user messages", () => {
48
+ const prompt = chat().format({
49
+ system: "you are a chatbot",
50
+ messages: [
51
+ { role: "user", content: "1st user message" },
52
+ { role: "assistant", content: "assistant message" },
53
+ { role: "user", content: "2nd user message" },
54
+ ],
55
+ });
56
+ expect(prompt).toMatchSnapshot();
57
+ });
47
58
  });
@@ -24,7 +24,7 @@ function text() {
24
24
  return {
25
25
  stopSequences: [END_SEGMENT],
26
26
  format(prompt) {
27
- return `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${prompt}${END_INSTRUCTION}\n`;
27
+ return `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${prompt}${END_INSTRUCTION}`;
28
28
  },
29
29
  };
30
30
  }
@@ -72,12 +72,14 @@ function chat() {
72
72
  return {
73
73
  format(prompt) {
74
74
  validateLlama2Prompt(prompt);
75
- let text = prompt.system != null
76
- ? // Separate section for system message to simplify implementation
77
- // (this is slightly different from the original instructions):
78
- `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${BEGIN_SYSTEM}${prompt.system}${END_SYSTEM}${END_INSTRUCTION}${END_SEGMENT}`
79
- : "";
80
- for (const { role, content } of prompt.messages) {
75
+ // get content of the first message (validated to be a user message)
76
+ const content = prompt.messages[0].content;
77
+ let text = `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${prompt.system != null
78
+ ? `${BEGIN_SYSTEM}${prompt.system}${END_SYSTEM}`
79
+ : ""}${content}${END_INSTRUCTION}`;
80
+ // process remaining messages
81
+ for (let i = 1; i < prompt.messages.length; i++) {
82
+ const { role, content } = prompt.messages[i];
81
83
  switch (role) {
82
84
  case "user": {
83
85
  const textContent = (0, ContentPart_js_1.validateContentIsString)(content, prompt);
@@ -21,7 +21,7 @@ export function text() {
21
21
  return {
22
22
  stopSequences: [END_SEGMENT],
23
23
  format(prompt) {
24
- return `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${prompt}${END_INSTRUCTION}\n`;
24
+ return `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${prompt}${END_INSTRUCTION}`;
25
25
  },
26
26
  };
27
27
  }
@@ -67,12 +67,14 @@ export function chat() {
67
67
  return {
68
68
  format(prompt) {
69
69
  validateLlama2Prompt(prompt);
70
- let text = prompt.system != null
71
- ? // Separate section for system message to simplify implementation
72
- // (this is slightly different from the original instructions):
73
- `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${BEGIN_SYSTEM}${prompt.system}${END_SYSTEM}${END_INSTRUCTION}${END_SEGMENT}`
74
- : "";
75
- for (const { role, content } of prompt.messages) {
70
+ // get content of the first message (validated to be a user message)
71
+ const content = prompt.messages[0].content;
72
+ let text = `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${prompt.system != null
73
+ ? `${BEGIN_SYSTEM}${prompt.system}${END_SYSTEM}`
74
+ : ""}${content}${END_INSTRUCTION}`;
75
+ // process remaining messages
76
+ for (let i = 1; i < prompt.messages.length; i++) {
77
+ const { role, content } = prompt.messages[i];
76
78
  switch (role) {
77
79
  case "user": {
78
80
  const textContent = validateContentIsString(content, prompt);
@@ -46,4 +46,15 @@ describe("chat prompt", () => {
46
46
  });
47
47
  expect(prompt).toMatchSnapshot();
48
48
  });
49
+ it("should format prompt with system message and user-assistant-user messages", () => {
50
+ const prompt = (0, Llama2PromptTemplate_js_1.chat)().format({
51
+ system: "you are a chatbot",
52
+ messages: [
53
+ { role: "user", content: "1st user message" },
54
+ { role: "assistant", content: "assistant message" },
55
+ { role: "user", content: "2nd user message" },
56
+ ],
57
+ });
58
+ expect(prompt).toMatchSnapshot();
59
+ });
49
60
  });
@@ -44,4 +44,15 @@ describe("chat prompt", () => {
44
44
  });
45
45
  expect(prompt).toMatchSnapshot();
46
46
  });
47
+ it("should format prompt with system message and user-assistant-user messages", () => {
48
+ const prompt = chat().format({
49
+ system: "you are a chatbot",
50
+ messages: [
51
+ { role: "user", content: "1st user message" },
52
+ { role: "assistant", content: "assistant message" },
53
+ { role: "user", content: "2nd user message" },
54
+ ],
55
+ });
56
+ expect(prompt).toMatchSnapshot();
57
+ });
47
58
  });
@@ -0,0 +1,150 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.validateMistralPrompt = exports.chat = exports.instruction = exports.text = void 0;
4
+ const ContentPart_js_1 = require("./ContentPart.cjs");
5
+ const InvalidPromptError_js_1 = require("./InvalidPromptError.cjs");
6
+ const BEGIN_SEGMENT = "<s>";
7
+ const END_SEGMENT = "</s>";
8
+ const BEGIN_INSTRUCTION = "[INST] ";
9
+ const END_INSTRUCTION = " [/INST] ";
10
+ /**
11
+ * Formats a text prompt as a Mistral instruct prompt.
12
+ *
13
+ * Mistral prompt template:
14
+ * ```
15
+ * <s>[INST] { instruction } [/INST]
16
+ * ```
17
+ *
18
+ * @see https://docs.mistral.ai/models/#chat-template
19
+ */
20
+ function text() {
21
+ return {
22
+ stopSequences: [END_SEGMENT],
23
+ format(prompt) {
24
+ return `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${prompt}${END_INSTRUCTION}`;
25
+ },
26
+ };
27
+ }
28
+ exports.text = text;
29
+ /**
30
+ * Formats an instruction prompt as a Mistral instruct prompt.
31
+ *
32
+ * Note that Mistral does not support system prompts. We emulate them.
33
+ *
34
+ * Mistral prompt template when system prompt is set:
35
+ * ```
36
+ * <s>[INST] ${ system prompt } [/INST] </s>[INST] ${instruction} [/INST] ${ response prefix }
37
+ * ```
38
+ *
39
+ * Mistral prompt template when there is no system prompt:
40
+ * ```
41
+ * <s>[INST] ${ instruction } [/INST] ${ response prefix }
42
+ * ```
43
+ *
44
+ * @see https://docs.mistral.ai/models/#chat-template
45
+ */
46
+ function instruction() {
47
+ return {
48
+ stopSequences: [END_SEGMENT],
49
+ format(prompt) {
50
+ const instruction = (0, ContentPart_js_1.validateContentIsString)(prompt.instruction, prompt);
51
+ if (prompt.system != null) {
52
+ return `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${prompt.system}${END_INSTRUCTION}${END_SEGMENT}${BEGIN_INSTRUCTION}${instruction}${END_INSTRUCTION}${prompt.responsePrefix ?? ""}`;
53
+ }
54
+ return `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${instruction}${END_INSTRUCTION}${prompt.responsePrefix ?? ""}`;
55
+ },
56
+ };
57
+ }
58
+ exports.instruction = instruction;
59
+ /**
60
+ * Formats a chat prompt as a Mistral instruct prompt.
61
+ *
62
+ * Note that Mistral does not support system prompts. We emulate them.
63
+ *
64
+ * Mistral prompt template when system prompt is set:
65
+ * ```
66
+ * <s>[INST] ${ system prompt } [/INST] </s> [INST] ${ user msg 1 } [/INST] ${ model response 1 } [INST] ${ user msg 2 } [/INST] ${ model response 2 } [INST] ${ user msg 3 } [/INST]
67
+ * ```
68
+ *
69
+ * Mistral prompt template when there is no system prompt:
70
+ * ```
71
+ * <s>[INST] ${ user msg 1 } [/INST] ${ model response 1 } </s>[INST] ${ user msg 2 } [/INST] ${ model response 2 } [INST] ${ user msg 3 } [/INST]
72
+ * ```
73
+ *
74
+ * @see https://docs.mistral.ai/models/#chat-template
75
+ */
76
+ function chat() {
77
+ return {
78
+ format(prompt) {
79
+ validateMistralPrompt(prompt);
80
+ let text = "";
81
+ let i = 0;
82
+ // handle the special first segment
83
+ if (prompt.system != null) {
84
+ text += `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${prompt.system}${END_INSTRUCTION}${END_SEGMENT}`;
85
+ }
86
+ else {
87
+ // get content of the first message (validated to be a user message)
88
+ text = `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${prompt.messages[0].content}${END_INSTRUCTION}`;
89
+ // process 2nd message (validated to be an assistant message)
90
+ if (prompt.messages.length > 1) {
91
+ text += `${prompt.messages[1].content}${END_SEGMENT}`;
92
+ }
93
+ i = 2;
94
+ }
95
+ // process remaining messages
96
+ for (; i < prompt.messages.length; i++) {
97
+ const { role, content } = prompt.messages[i];
98
+ switch (role) {
99
+ case "user": {
100
+ const textContent = (0, ContentPart_js_1.validateContentIsString)(content, prompt);
101
+ text += `${BEGIN_INSTRUCTION}${textContent}${END_INSTRUCTION}`;
102
+ break;
103
+ }
104
+ case "assistant": {
105
+ text += (0, ContentPart_js_1.validateContentIsString)(content, prompt);
106
+ break;
107
+ }
108
+ case "tool": {
109
+ throw new InvalidPromptError_js_1.InvalidPromptError("Tool messages are not supported.", prompt);
110
+ }
111
+ default: {
112
+ const _exhaustiveCheck = role;
113
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
114
+ }
115
+ }
116
+ }
117
+ return text;
118
+ },
119
+ stopSequences: [END_SEGMENT],
120
+ };
121
+ }
122
+ exports.chat = chat;
123
+ /**
124
+ * Checks if a Mistral chat prompt is valid. Throws a {@link ChatPromptValidationError} if it's not.
125
+ *
126
+ * - The first message of the chat must be a user message.
127
+ * - Then it must be alternating between an assistant message and a user message.
128
+ * - The last message must always be a user message (when submitting to a model).
129
+ *
130
+ * The type checking is done at runtime when you submit a chat prompt to a model with a prompt template.
131
+ *
132
+ * @throws {@link ChatPromptValidationError}
133
+ */
134
+ function validateMistralPrompt(chatPrompt) {
135
+ const messages = chatPrompt.messages;
136
+ if (messages.length < 1) {
137
+ throw new InvalidPromptError_js_1.InvalidPromptError("ChatPrompt should have at least one message.", chatPrompt);
138
+ }
139
+ for (let i = 0; i < messages.length; i++) {
140
+ const expectedRole = i % 2 === 0 ? "user" : "assistant";
141
+ const role = messages[i].role;
142
+ if (role !== expectedRole) {
143
+ throw new InvalidPromptError_js_1.InvalidPromptError(`Message at index ${i} should have role '${expectedRole}', but has role '${role}'.`, chatPrompt);
144
+ }
145
+ }
146
+ if (messages.length % 2 === 0) {
147
+ throw new InvalidPromptError_js_1.InvalidPromptError("The last message must be a user message.", chatPrompt);
148
+ }
149
+ }
150
+ exports.validateMistralPrompt = validateMistralPrompt;
@@ -0,0 +1,62 @@
1
+ import { TextGenerationPromptTemplate } from "../TextGenerationPromptTemplate.js";
2
+ import { ChatPrompt } from "./ChatPrompt.js";
3
+ import { InstructionPrompt } from "./InstructionPrompt.js";
4
+ /**
5
+ * Formats a text prompt as a Mistral instruct prompt.
6
+ *
7
+ * Mistral prompt template:
8
+ * ```
9
+ * <s>[INST] { instruction } [/INST]
10
+ * ```
11
+ *
12
+ * @see https://docs.mistral.ai/models/#chat-template
13
+ */
14
+ export declare function text(): TextGenerationPromptTemplate<string, string>;
15
+ /**
16
+ * Formats an instruction prompt as a Mistral instruct prompt.
17
+ *
18
+ * Note that Mistral does not support system prompts. We emulate them.
19
+ *
20
+ * Mistral prompt template when system prompt is set:
21
+ * ```
22
+ * <s>[INST] ${ system prompt } [/INST] </s>[INST] ${instruction} [/INST] ${ response prefix }
23
+ * ```
24
+ *
25
+ * Mistral prompt template when there is no system prompt:
26
+ * ```
27
+ * <s>[INST] ${ instruction } [/INST] ${ response prefix }
28
+ * ```
29
+ *
30
+ * @see https://docs.mistral.ai/models/#chat-template
31
+ */
32
+ export declare function instruction(): TextGenerationPromptTemplate<InstructionPrompt, string>;
33
+ /**
34
+ * Formats a chat prompt as a Mistral instruct prompt.
35
+ *
36
+ * Note that Mistral does not support system prompts. We emulate them.
37
+ *
38
+ * Mistral prompt template when system prompt is set:
39
+ * ```
40
+ * <s>[INST] ${ system prompt } [/INST] </s> [INST] ${ user msg 1 } [/INST] ${ model response 1 } [INST] ${ user msg 2 } [/INST] ${ model response 2 } [INST] ${ user msg 3 } [/INST]
41
+ * ```
42
+ *
43
+ * Mistral prompt template when there is no system prompt:
44
+ * ```
45
+ * <s>[INST] ${ user msg 1 } [/INST] ${ model response 1 } </s>[INST] ${ user msg 2 } [/INST] ${ model response 2 } [INST] ${ user msg 3 } [/INST]
46
+ * ```
47
+ *
48
+ * @see https://docs.mistral.ai/models/#chat-template
49
+ */
50
+ export declare function chat(): TextGenerationPromptTemplate<ChatPrompt, string>;
51
+ /**
52
+ * Checks if a Mistral chat prompt is valid. Throws a {@link ChatPromptValidationError} if it's not.
53
+ *
54
+ * - The first message of the chat must be a user message.
55
+ * - Then it must be alternating between an assistant message and a user message.
56
+ * - The last message must always be a user message (when submitting to a model).
57
+ *
58
+ * The type checking is done at runtime when you submit a chat prompt to a model with a prompt template.
59
+ *
60
+ * @throws {@link ChatPromptValidationError}
61
+ */
62
+ export declare function validateMistralPrompt(chatPrompt: ChatPrompt): void;
@@ -0,0 +1,143 @@
1
+ import { validateContentIsString } from "./ContentPart.js";
2
+ import { InvalidPromptError } from "./InvalidPromptError.js";
3
+ const BEGIN_SEGMENT = "<s>";
4
+ const END_SEGMENT = "</s>";
5
+ const BEGIN_INSTRUCTION = "[INST] ";
6
+ const END_INSTRUCTION = " [/INST] ";
7
+ /**
8
+ * Formats a text prompt as a Mistral instruct prompt.
9
+ *
10
+ * Mistral prompt template:
11
+ * ```
12
+ * <s>[INST] { instruction } [/INST]
13
+ * ```
14
+ *
15
+ * @see https://docs.mistral.ai/models/#chat-template
16
+ */
17
+ export function text() {
18
+ return {
19
+ stopSequences: [END_SEGMENT],
20
+ format(prompt) {
21
+ return `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${prompt}${END_INSTRUCTION}`;
22
+ },
23
+ };
24
+ }
25
+ /**
26
+ * Formats an instruction prompt as a Mistral instruct prompt.
27
+ *
28
+ * Note that Mistral does not support system prompts. We emulate them.
29
+ *
30
+ * Mistral prompt template when system prompt is set:
31
+ * ```
32
+ * <s>[INST] ${ system prompt } [/INST] </s>[INST] ${instruction} [/INST] ${ response prefix }
33
+ * ```
34
+ *
35
+ * Mistral prompt template when there is no system prompt:
36
+ * ```
37
+ * <s>[INST] ${ instruction } [/INST] ${ response prefix }
38
+ * ```
39
+ *
40
+ * @see https://docs.mistral.ai/models/#chat-template
41
+ */
42
+ export function instruction() {
43
+ return {
44
+ stopSequences: [END_SEGMENT],
45
+ format(prompt) {
46
+ const instruction = validateContentIsString(prompt.instruction, prompt);
47
+ if (prompt.system != null) {
48
+ return `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${prompt.system}${END_INSTRUCTION}${END_SEGMENT}${BEGIN_INSTRUCTION}${instruction}${END_INSTRUCTION}${prompt.responsePrefix ?? ""}`;
49
+ }
50
+ return `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${instruction}${END_INSTRUCTION}${prompt.responsePrefix ?? ""}`;
51
+ },
52
+ };
53
+ }
54
+ /**
55
+ * Formats a chat prompt as a Mistral instruct prompt.
56
+ *
57
+ * Note that Mistral does not support system prompts. We emulate them.
58
+ *
59
+ * Mistral prompt template when system prompt is set:
60
+ * ```
61
+ * <s>[INST] ${ system prompt } [/INST] </s> [INST] ${ user msg 1 } [/INST] ${ model response 1 } [INST] ${ user msg 2 } [/INST] ${ model response 2 } [INST] ${ user msg 3 } [/INST]
62
+ * ```
63
+ *
64
+ * Mistral prompt template when there is no system prompt:
65
+ * ```
66
+ * <s>[INST] ${ user msg 1 } [/INST] ${ model response 1 } </s>[INST] ${ user msg 2 } [/INST] ${ model response 2 } [INST] ${ user msg 3 } [/INST]
67
+ * ```
68
+ *
69
+ * @see https://docs.mistral.ai/models/#chat-template
70
+ */
71
+ export function chat() {
72
+ return {
73
+ format(prompt) {
74
+ validateMistralPrompt(prompt);
75
+ let text = "";
76
+ let i = 0;
77
+ // handle the special first segment
78
+ if (prompt.system != null) {
79
+ text += `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${prompt.system}${END_INSTRUCTION}${END_SEGMENT}`;
80
+ }
81
+ else {
82
+ // get content of the first message (validated to be a user message)
83
+ text = `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${prompt.messages[0].content}${END_INSTRUCTION}`;
84
+ // process 2nd message (validated to be an assistant message)
85
+ if (prompt.messages.length > 1) {
86
+ text += `${prompt.messages[1].content}${END_SEGMENT}`;
87
+ }
88
+ i = 2;
89
+ }
90
+ // process remaining messages
91
+ for (; i < prompt.messages.length; i++) {
92
+ const { role, content } = prompt.messages[i];
93
+ switch (role) {
94
+ case "user": {
95
+ const textContent = validateContentIsString(content, prompt);
96
+ text += `${BEGIN_INSTRUCTION}${textContent}${END_INSTRUCTION}`;
97
+ break;
98
+ }
99
+ case "assistant": {
100
+ text += validateContentIsString(content, prompt);
101
+ break;
102
+ }
103
+ case "tool": {
104
+ throw new InvalidPromptError("Tool messages are not supported.", prompt);
105
+ }
106
+ default: {
107
+ const _exhaustiveCheck = role;
108
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
109
+ }
110
+ }
111
+ }
112
+ return text;
113
+ },
114
+ stopSequences: [END_SEGMENT],
115
+ };
116
+ }
117
+ /**
118
+ * Checks if a Mistral chat prompt is valid. Throws a {@link ChatPromptValidationError} if it's not.
119
+ *
120
+ * - The first message of the chat must be a user message.
121
+ * - Then it must be alternating between an assistant message and a user message.
122
+ * - The last message must always be a user message (when submitting to a model).
123
+ *
124
+ * The type checking is done at runtime when you submit a chat prompt to a model with a prompt template.
125
+ *
126
+ * @throws {@link ChatPromptValidationError}
127
+ */
128
+ export function validateMistralPrompt(chatPrompt) {
129
+ const messages = chatPrompt.messages;
130
+ if (messages.length < 1) {
131
+ throw new InvalidPromptError("ChatPrompt should have at least one message.", chatPrompt);
132
+ }
133
+ for (let i = 0; i < messages.length; i++) {
134
+ const expectedRole = i % 2 === 0 ? "user" : "assistant";
135
+ const role = messages[i].role;
136
+ if (role !== expectedRole) {
137
+ throw new InvalidPromptError(`Message at index ${i} should have role '${expectedRole}', but has role '${role}'.`, chatPrompt);
138
+ }
139
+ }
140
+ if (messages.length % 2 === 0) {
141
+ throw new InvalidPromptError("The last message must be a user message.", chatPrompt);
142
+ }
143
+ }
@@ -0,0 +1,60 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ const MistralInstructPromptTemplate_js_1 = require("./MistralInstructPromptTemplate.cjs");
4
+ describe("text prompt", () => {
5
+ it("should format prompt", () => {
6
+ const prompt = (0, MistralInstructPromptTemplate_js_1.text)().format("prompt");
7
+ expect(prompt).toMatchSnapshot();
8
+ });
9
+ });
10
+ describe("instruction prompt", () => {
11
+ it("should format prompt with instruction", () => {
12
+ const prompt = (0, MistralInstructPromptTemplate_js_1.instruction)().format({
13
+ instruction: "instruction",
14
+ });
15
+ expect(prompt).toMatchSnapshot();
16
+ });
17
+ it("should format prompt with system and instruction", () => {
18
+ const prompt = (0, MistralInstructPromptTemplate_js_1.instruction)().format({
19
+ system: "system",
20
+ instruction: "instruction",
21
+ });
22
+ expect(prompt).toMatchSnapshot();
23
+ });
24
+ it("should format prompt with instruction and response prefix", () => {
25
+ const prompt = (0, MistralInstructPromptTemplate_js_1.instruction)().format({
26
+ instruction: "instruction",
27
+ responsePrefix: "response prefix",
28
+ });
29
+ expect(prompt).toMatchSnapshot();
30
+ });
31
+ });
32
+ describe("chat prompt", () => {
33
+ it("should format prompt with user message", () => {
34
+ const prompt = (0, MistralInstructPromptTemplate_js_1.chat)().format({
35
+ messages: [{ role: "user", content: "user message" }],
36
+ });
37
+ expect(prompt).toMatchSnapshot();
38
+ });
39
+ it("should format prompt with user-assistant-user messages", () => {
40
+ const prompt = (0, MistralInstructPromptTemplate_js_1.chat)().format({
41
+ messages: [
42
+ { role: "user", content: "1st user message" },
43
+ { role: "assistant", content: "assistant message" },
44
+ { role: "user", content: "2nd user message" },
45
+ ],
46
+ });
47
+ expect(prompt).toMatchSnapshot();
48
+ });
49
+ it("should format prompt with system message and user-assistant-user messages", () => {
50
+ const prompt = (0, MistralInstructPromptTemplate_js_1.chat)().format({
51
+ system: "you are a chatbot",
52
+ messages: [
53
+ { role: "user", content: "1st user message" },
54
+ { role: "assistant", content: "assistant message" },
55
+ { role: "user", content: "2nd user message" },
56
+ ],
57
+ });
58
+ expect(prompt).toMatchSnapshot();
59
+ });
60
+ });