modelfusion 0.106.0 → 0.108.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +59 -0
- package/README.md +19 -59
- package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.test.cjs +11 -0
- package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.test.js +11 -0
- package/model-function/generate-text/prompt-template/Llama2PromptTemplate.cjs +9 -7
- package/model-function/generate-text/prompt-template/Llama2PromptTemplate.js +9 -7
- package/model-function/generate-text/prompt-template/Llama2PromptTemplate.test.cjs +11 -0
- package/model-function/generate-text/prompt-template/Llama2PromptTemplate.test.js +11 -0
- package/model-function/generate-text/prompt-template/MistralInstructPromptTemplate.cjs +150 -0
- package/model-function/generate-text/prompt-template/MistralInstructPromptTemplate.d.ts +62 -0
- package/model-function/generate-text/prompt-template/MistralInstructPromptTemplate.js +143 -0
- package/model-function/generate-text/prompt-template/MistralInstructPromptTemplate.test.cjs +60 -0
- package/model-function/generate-text/prompt-template/MistralInstructPromptTemplate.test.js +58 -0
- package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.test.cjs +11 -0
- package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.test.js +11 -0
- package/model-function/generate-text/prompt-template/TextPromptTemplate.test.cjs +11 -0
- package/model-function/generate-text/prompt-template/TextPromptTemplate.test.js +11 -0
- package/model-function/generate-text/prompt-template/VicunaPromptTemplate.test.cjs +11 -0
- package/model-function/generate-text/prompt-template/VicunaPromptTemplate.test.js +11 -0
- package/model-function/generate-text/prompt-template/index.cjs +2 -1
- package/model-function/generate-text/prompt-template/index.d.ts +1 -0
- package/model-function/generate-text/prompt-template/index.js +1 -0
- package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.d.ts +3 -3
- package/model-provider/llamacpp/{LlamaCppTextGenerationModel.cjs → LlamaCppCompletionModel.cjs} +25 -11
- package/model-provider/llamacpp/{LlamaCppTextGenerationModel.d.ts → LlamaCppCompletionModel.d.ts} +125 -38
- package/model-provider/llamacpp/{LlamaCppTextGenerationModel.js → LlamaCppCompletionModel.js} +23 -9
- package/model-provider/llamacpp/{LlamaCppTextGenerationModel.test.cjs → LlamaCppCompletionModel.test.cjs} +3 -3
- package/model-provider/llamacpp/LlamaCppCompletionModel.test.d.ts +1 -0
- package/model-provider/llamacpp/{LlamaCppTextGenerationModel.test.js → LlamaCppCompletionModel.test.js} +3 -3
- package/model-provider/llamacpp/LlamaCppFacade.cjs +2 -2
- package/model-provider/llamacpp/LlamaCppFacade.d.ts +2 -2
- package/model-provider/llamacpp/LlamaCppFacade.js +2 -2
- package/model-provider/llamacpp/index.cjs +1 -1
- package/model-provider/llamacpp/index.d.ts +1 -1
- package/model-provider/llamacpp/index.js +1 -1
- package/model-provider/mistral/MistralChatModel.cjs +4 -4
- package/model-provider/mistral/MistralChatModel.d.ts +6 -6
- package/model-provider/mistral/MistralChatModel.js +1 -1
- package/model-provider/mistral/index.cjs +3 -3
- package/model-provider/mistral/index.d.ts +2 -2
- package/model-provider/mistral/index.js +2 -2
- package/model-provider/openai/AbstractOpenAIChatModel.cjs +2 -10
- package/model-provider/openai/AbstractOpenAIChatModel.d.ts +13 -195
- package/model-provider/openai/AbstractOpenAIChatModel.js +2 -10
- package/model-provider/openai/AbstractOpenAICompletionModel.cjs +167 -0
- package/model-provider/openai/AbstractOpenAICompletionModel.d.ts +199 -0
- package/model-provider/openai/AbstractOpenAICompletionModel.js +163 -0
- package/model-provider/openai/OpenAIChatFunctionCallStructureGenerationModel.d.ts +1 -3
- package/model-provider/openai/OpenAIChatModel.d.ts +3 -6
- package/model-provider/openai/OpenAICompletionModel.cjs +4 -156
- package/model-provider/openai/OpenAICompletionModel.d.ts +4 -191
- package/model-provider/openai/OpenAICompletionModel.js +3 -155
- package/model-provider/openai/index.cjs +1 -0
- package/model-provider/openai/index.d.ts +1 -0
- package/model-provider/openai/index.js +1 -0
- package/model-provider/openai-compatible/OpenAICompatibleChatModel.d.ts +4 -5
- package/model-provider/openai-compatible/OpenAICompatibleCompletionModel.cjs +74 -0
- package/model-provider/openai-compatible/OpenAICompatibleCompletionModel.d.ts +27 -0
- package/model-provider/openai-compatible/OpenAICompatibleCompletionModel.js +70 -0
- package/model-provider/openai-compatible/OpenAICompatibleFacade.cjs +37 -6
- package/model-provider/openai-compatible/OpenAICompatibleFacade.d.ts +33 -5
- package/model-provider/openai-compatible/OpenAICompatibleFacade.js +35 -5
- package/model-provider/openai-compatible/OpenAICompatibleProviderName.cjs +2 -0
- package/model-provider/openai-compatible/OpenAICompatibleProviderName.d.ts +1 -0
- package/model-provider/openai-compatible/OpenAICompatibleProviderName.js +1 -0
- package/model-provider/openai-compatible/TogetherAIApiConfiguration.cjs +29 -0
- package/model-provider/openai-compatible/TogetherAIApiConfiguration.d.ts +18 -0
- package/model-provider/openai-compatible/TogetherAIApiConfiguration.js +25 -0
- package/model-provider/openai-compatible/index.cjs +4 -1
- package/model-provider/openai-compatible/index.d.ts +4 -1
- package/model-provider/openai-compatible/index.js +4 -1
- package/package.json +16 -16
- package/tool/generate-tool-call/index.cjs +1 -0
- package/tool/generate-tool-call/index.d.ts +1 -0
- package/tool/generate-tool-call/index.js +1 -0
- package/tool/generate-tool-call/jsonToolCallPrompt.cjs +30 -0
- package/tool/generate-tool-call/jsonToolCallPrompt.d.ts +5 -0
- package/tool/generate-tool-call/jsonToolCallPrompt.js +27 -0
- /package/{model-provider/llamacpp/LlamaCppTextGenerationModel.test.d.ts → model-function/generate-text/prompt-template/MistralInstructPromptTemplate.test.d.ts} +0 -0
- /package/model-provider/mistral/{MistralPromptTemplate.cjs → MistralChatPromptTemplate.cjs} +0 -0
- /package/model-provider/mistral/{MistralPromptTemplate.d.ts → MistralChatPromptTemplate.d.ts} +0 -0
- /package/model-provider/mistral/{MistralPromptTemplate.js → MistralChatPromptTemplate.js} +0 -0
package/CHANGELOG.md
CHANGED
@@ -1,5 +1,64 @@
|
|
1
1
|
# Changelog
|
2
2
|
|
3
|
+
## v0.108.0 - 2023-12-30
|
4
|
+
|
5
|
+
### Added
|
6
|
+
|
7
|
+
- [Open AI compatible completion model](https://modelfusion.dev/integration/model-provider/openaicompatible/). It e.g. works with Fireworks AI.
|
8
|
+
- Together AI API configuration (for Open AI compatible chat models):
|
9
|
+
|
10
|
+
```ts
|
11
|
+
import {
|
12
|
+
TogetherAIApiConfiguration,
|
13
|
+
openaicompatible,
|
14
|
+
streamText,
|
15
|
+
} from "modelfusion";
|
16
|
+
|
17
|
+
const textStream = await streamText(
|
18
|
+
openaicompatible
|
19
|
+
.ChatTextGenerator({
|
20
|
+
api: new TogetherAIApiConfiguration(),
|
21
|
+
model: "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
22
|
+
})
|
23
|
+
.withTextPrompt(),
|
24
|
+
|
25
|
+
"Write a story about a robot learning to love"
|
26
|
+
);
|
27
|
+
```
|
28
|
+
|
29
|
+
- Updated Llama.cpp model settings. GBNF grammars can be passed into the `grammar` setting:
|
30
|
+
|
31
|
+
```ts
|
32
|
+
const text = await generateText(
|
33
|
+
llamacpp
|
34
|
+
.TextGenerator({
|
35
|
+
maxGenerationTokens: 512,
|
36
|
+
temperature: 0,
|
37
|
+
// simple list grammar:
|
38
|
+
grammar: `root ::= ("- " item)+
|
39
|
+
item ::= [^\\n]+ "\\n"`,
|
40
|
+
})
|
41
|
+
.withTextPromptTemplate(MistralInstructPrompt.text()),
|
42
|
+
|
43
|
+
"List 5 ingredients for a lasagna:\n\n"
|
44
|
+
);
|
45
|
+
```
|
46
|
+
|
47
|
+
## v0.107.0 - 2023-12-29
|
48
|
+
|
49
|
+
### Added
|
50
|
+
|
51
|
+
- Mistral instruct prompt template
|
52
|
+
|
53
|
+
### Changed
|
54
|
+
|
55
|
+
- **breaking change**: Renamed `LlamaCppTextGenerationModel` to `LlamaCppCompletionModel`.
|
56
|
+
|
57
|
+
### Fixed
|
58
|
+
|
59
|
+
- Updated `LlamaCppCompletionModel` to the latest llama.cpp version.
|
60
|
+
- Fixed formatting of system prompt for chats in Llama2 2 prompt template.
|
61
|
+
|
3
62
|
## v0.106.0 - 2023-12-28
|
4
63
|
|
5
64
|
Experimental features that are unlikely to become stable before v1.0 have been moved to a separate `modelfusion-experimental` package.
|
package/README.md
CHANGED
@@ -16,12 +16,13 @@
|
|
16
16
|
|
17
17
|
- **Vendor-neutral**: ModelFusion is a non-commercial open source project that is community-driven. You can use it with any supported provider.
|
18
18
|
- **Multi-modal**: ModelFusion supports a wide range of models including text generation, image generation, vision, text-to-speech, speech-to-text, and embedding models.
|
19
|
-
- **Type inference and validation**: ModelFusion infers TypeScript types wherever possible and
|
19
|
+
- **Type inference and validation**: ModelFusion infers TypeScript types wherever possible and validates model responses.
|
20
20
|
- **Observability and logging**: ModelFusion provides an observer framework and out-of-the-box logging support.
|
21
|
-
- **Resilience and
|
21
|
+
- **Resilience and robustness**: ModelFusion ensures seamless operation through automatic retries, throttling, and error handling mechanisms.
|
22
|
+
- **Built for production**: ModelFusion is fully tree-shakeable, can be used in serverless environments, and only uses a minimal set of dependencies.
|
22
23
|
|
23
24
|
> [!NOTE]
|
24
|
-
> ModelFusion is
|
25
|
+
> ModelFusion is getting closer to a stable v1, which is expected in Q1/2024. The main API is now mostly stable, but until version 1.0 there may be breaking changes. Feedback and suggestions are welcome.
|
25
26
|
|
26
27
|
## Quick Install
|
27
28
|
|
@@ -44,8 +45,8 @@ You can provide API keys for the different [integrations](https://modelfusion.de
|
|
44
45
|
|
45
46
|
### [Generate Text](https://modelfusion.dev/guide/function/generate-text)
|
46
47
|
|
47
|
-
Generate text using a language model and a prompt. You can stream the text if it is supported by the model. You can use images for multi-modal prompting if the model supports it (e.g. with [llama.cpp](https://modelfusion.dev/
|
48
|
-
You can use [prompt templates](https://modelfusion.dev/guide/function/generate-text#prompt-
|
48
|
+
Generate text using a language model and a prompt. You can stream the text if it is supported by the model. You can use images for multi-modal prompting if the model supports it (e.g. with [llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp)).
|
49
|
+
You can use [prompt templates](https://modelfusion.dev/guide/function/generate-text#prompt-template) to change the prompt template of a model.
|
49
50
|
|
50
51
|
#### generateText
|
51
52
|
|
@@ -58,7 +59,7 @@ const text = await generateText(
|
|
58
59
|
);
|
59
60
|
```
|
60
61
|
|
61
|
-
Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [OpenAI compatible](https://modelfusion.dev/integration/model-provider/openaicompatible), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp), [Ollama](https://modelfusion.dev/integration/model-provider/ollama), [
|
62
|
+
Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [OpenAI compatible](https://modelfusion.dev/integration/model-provider/openaicompatible), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp), [Ollama](https://modelfusion.dev/integration/model-provider/ollama), [Mistral](https://modelfusion.dev/integration/model-provider/mistral), [Hugging Face](https://modelfusion.dev/integration/model-provider/huggingface), [Cohere](https://modelfusion.dev/integration/model-provider/cohere), [Anthropic](https://modelfusion.dev/integration/model-provider/anthropic)
|
62
63
|
|
63
64
|
#### streamText
|
64
65
|
|
@@ -323,53 +324,11 @@ const reconstructedText = await tokenizer.detokenize(tokens);
|
|
323
324
|
|
324
325
|
Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp), [Cohere](https://modelfusion.dev/integration/model-provider/cohere)
|
325
326
|
|
326
|
-
### [Guards](https://modelfusion.dev/guide/guard)
|
327
|
-
|
328
|
-
Guard functions can be used to implement retry on error, redacting and changing reponses, etc.
|
329
|
-
|
330
|
-
#### Retry structure parsing on error
|
331
|
-
|
332
|
-
```ts
|
333
|
-
const result = await guard(
|
334
|
-
(input, options) =>
|
335
|
-
generateStructure(
|
336
|
-
openai
|
337
|
-
.ChatTextGenerator({
|
338
|
-
// ...
|
339
|
-
})
|
340
|
-
.asFunctionCallStructureGenerationModel({
|
341
|
-
fnName: "myFunction",
|
342
|
-
}),
|
343
|
-
zodSchema({
|
344
|
-
// ...
|
345
|
-
}),
|
346
|
-
input,
|
347
|
-
options
|
348
|
-
),
|
349
|
-
[
|
350
|
-
// ...
|
351
|
-
],
|
352
|
-
fixStructure({
|
353
|
-
modifyInputForRetry: async ({ input, error }) => [
|
354
|
-
...input,
|
355
|
-
openai.ChatMessage.assistant(null, {
|
356
|
-
functionCall: {
|
357
|
-
name: "sentiment",
|
358
|
-
arguments: JSON.stringify(error.valueText),
|
359
|
-
},
|
360
|
-
}),
|
361
|
-
openai.ChatMessage.user(error.message),
|
362
|
-
openai.ChatMessage.user("Please fix the error and try again."),
|
363
|
-
],
|
364
|
-
})
|
365
|
-
);
|
366
|
-
```
|
367
|
-
|
368
327
|
### [Tools](https://modelfusion.dev/guide/tools)
|
369
328
|
|
370
329
|
Tools are functions that can be executed by an AI model. They are useful for building chatbots and agents.
|
371
330
|
|
372
|
-
Predefined tools: [SerpAPI](https://modelfusion.dev/
|
331
|
+
Predefined tools: [Math.js](https://modelfusion.dev/guide/tools/predefined-tools/mathjs), [SerpAPI](https://modelfusion.dev/guide/tools/predefined-tools/serpapi), [Google Custom Search](https://modelfusion.dev/guide/tools/predefined-tools/google-custom-search)
|
373
332
|
|
374
333
|
#### [Creating Tools](https://modelfusion.dev/guide/tools/create-tools)
|
375
334
|
|
@@ -579,16 +538,17 @@ const textStream = await streamText(
|
|
579
538
|
);
|
580
539
|
```
|
581
540
|
|
582
|
-
| Prompt Template
|
583
|
-
|
|
584
|
-
| OpenAI Chat
|
585
|
-
| Anthropic
|
586
|
-
| Llama 2
|
587
|
-
| ChatML
|
588
|
-
| NeuralChat
|
589
|
-
|
|
590
|
-
|
|
591
|
-
|
|
541
|
+
| Prompt Template | Text Prompt | Instruction Prompt | Chat Prompt |
|
542
|
+
| ---------------- | ----------- | ------------------ | ----------- |
|
543
|
+
| OpenAI Chat | ✅ | ✅ | ✅ |
|
544
|
+
| Anthropic | ✅ | ✅ | ✅ |
|
545
|
+
| Llama 2 | ✅ | ✅ | ✅ |
|
546
|
+
| ChatML | ✅ | ✅ | ✅ |
|
547
|
+
| NeuralChat | ✅ | ✅ | ✅ |
|
548
|
+
| Mistral Instruct | ✅ | ✅ | ✅ |
|
549
|
+
| Alpaca | ✅ | ✅ | ❌ |
|
550
|
+
| Vicuna | ❌ | ❌ | ✅ |
|
551
|
+
| Generic Text | ✅ | ✅ | ✅ |
|
592
552
|
|
593
553
|
### [Image Generation Prompt Templates](https://modelfusion.dev/guide/function/generate-image/prompt-format)
|
594
554
|
|
@@ -46,4 +46,15 @@ describe("chat prompt", () => {
|
|
46
46
|
});
|
47
47
|
expect(prompt).toMatchSnapshot();
|
48
48
|
});
|
49
|
+
it("should format prompt with system message and user-assistant-user messages", () => {
|
50
|
+
const prompt = (0, ChatMLPromptTemplate_js_1.chat)().format({
|
51
|
+
system: "you are a chatbot",
|
52
|
+
messages: [
|
53
|
+
{ role: "user", content: "1st user message" },
|
54
|
+
{ role: "assistant", content: "assistant message" },
|
55
|
+
{ role: "user", content: "2nd user message" },
|
56
|
+
],
|
57
|
+
});
|
58
|
+
expect(prompt).toMatchSnapshot();
|
59
|
+
});
|
49
60
|
});
|
@@ -44,4 +44,15 @@ describe("chat prompt", () => {
|
|
44
44
|
});
|
45
45
|
expect(prompt).toMatchSnapshot();
|
46
46
|
});
|
47
|
+
it("should format prompt with system message and user-assistant-user messages", () => {
|
48
|
+
const prompt = chat().format({
|
49
|
+
system: "you are a chatbot",
|
50
|
+
messages: [
|
51
|
+
{ role: "user", content: "1st user message" },
|
52
|
+
{ role: "assistant", content: "assistant message" },
|
53
|
+
{ role: "user", content: "2nd user message" },
|
54
|
+
],
|
55
|
+
});
|
56
|
+
expect(prompt).toMatchSnapshot();
|
57
|
+
});
|
47
58
|
});
|
@@ -24,7 +24,7 @@ function text() {
|
|
24
24
|
return {
|
25
25
|
stopSequences: [END_SEGMENT],
|
26
26
|
format(prompt) {
|
27
|
-
return `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${prompt}${END_INSTRUCTION}
|
27
|
+
return `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${prompt}${END_INSTRUCTION}`;
|
28
28
|
},
|
29
29
|
};
|
30
30
|
}
|
@@ -72,12 +72,14 @@ function chat() {
|
|
72
72
|
return {
|
73
73
|
format(prompt) {
|
74
74
|
validateLlama2Prompt(prompt);
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
: ""
|
80
|
-
|
75
|
+
// get content of the first message (validated to be a user message)
|
76
|
+
const content = prompt.messages[0].content;
|
77
|
+
let text = `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${prompt.system != null
|
78
|
+
? `${BEGIN_SYSTEM}${prompt.system}${END_SYSTEM}`
|
79
|
+
: ""}${content}${END_INSTRUCTION}`;
|
80
|
+
// process remaining messages
|
81
|
+
for (let i = 1; i < prompt.messages.length; i++) {
|
82
|
+
const { role, content } = prompt.messages[i];
|
81
83
|
switch (role) {
|
82
84
|
case "user": {
|
83
85
|
const textContent = (0, ContentPart_js_1.validateContentIsString)(content, prompt);
|
@@ -21,7 +21,7 @@ export function text() {
|
|
21
21
|
return {
|
22
22
|
stopSequences: [END_SEGMENT],
|
23
23
|
format(prompt) {
|
24
|
-
return `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${prompt}${END_INSTRUCTION}
|
24
|
+
return `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${prompt}${END_INSTRUCTION}`;
|
25
25
|
},
|
26
26
|
};
|
27
27
|
}
|
@@ -67,12 +67,14 @@ export function chat() {
|
|
67
67
|
return {
|
68
68
|
format(prompt) {
|
69
69
|
validateLlama2Prompt(prompt);
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
: ""
|
75
|
-
|
70
|
+
// get content of the first message (validated to be a user message)
|
71
|
+
const content = prompt.messages[0].content;
|
72
|
+
let text = `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${prompt.system != null
|
73
|
+
? `${BEGIN_SYSTEM}${prompt.system}${END_SYSTEM}`
|
74
|
+
: ""}${content}${END_INSTRUCTION}`;
|
75
|
+
// process remaining messages
|
76
|
+
for (let i = 1; i < prompt.messages.length; i++) {
|
77
|
+
const { role, content } = prompt.messages[i];
|
76
78
|
switch (role) {
|
77
79
|
case "user": {
|
78
80
|
const textContent = validateContentIsString(content, prompt);
|
@@ -46,4 +46,15 @@ describe("chat prompt", () => {
|
|
46
46
|
});
|
47
47
|
expect(prompt).toMatchSnapshot();
|
48
48
|
});
|
49
|
+
it("should format prompt with system message and user-assistant-user messages", () => {
|
50
|
+
const prompt = (0, Llama2PromptTemplate_js_1.chat)().format({
|
51
|
+
system: "you are a chatbot",
|
52
|
+
messages: [
|
53
|
+
{ role: "user", content: "1st user message" },
|
54
|
+
{ role: "assistant", content: "assistant message" },
|
55
|
+
{ role: "user", content: "2nd user message" },
|
56
|
+
],
|
57
|
+
});
|
58
|
+
expect(prompt).toMatchSnapshot();
|
59
|
+
});
|
49
60
|
});
|
@@ -44,4 +44,15 @@ describe("chat prompt", () => {
|
|
44
44
|
});
|
45
45
|
expect(prompt).toMatchSnapshot();
|
46
46
|
});
|
47
|
+
it("should format prompt with system message and user-assistant-user messages", () => {
|
48
|
+
const prompt = chat().format({
|
49
|
+
system: "you are a chatbot",
|
50
|
+
messages: [
|
51
|
+
{ role: "user", content: "1st user message" },
|
52
|
+
{ role: "assistant", content: "assistant message" },
|
53
|
+
{ role: "user", content: "2nd user message" },
|
54
|
+
],
|
55
|
+
});
|
56
|
+
expect(prompt).toMatchSnapshot();
|
57
|
+
});
|
47
58
|
});
|
@@ -0,0 +1,150 @@
|
|
1
|
+
"use strict";
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
+
exports.validateMistralPrompt = exports.chat = exports.instruction = exports.text = void 0;
|
4
|
+
const ContentPart_js_1 = require("./ContentPart.cjs");
|
5
|
+
const InvalidPromptError_js_1 = require("./InvalidPromptError.cjs");
|
6
|
+
const BEGIN_SEGMENT = "<s>";
|
7
|
+
const END_SEGMENT = "</s>";
|
8
|
+
const BEGIN_INSTRUCTION = "[INST] ";
|
9
|
+
const END_INSTRUCTION = " [/INST] ";
|
10
|
+
/**
|
11
|
+
* Formats a text prompt as a Mistral instruct prompt.
|
12
|
+
*
|
13
|
+
* Mistral prompt template:
|
14
|
+
* ```
|
15
|
+
* <s>[INST] { instruction } [/INST]
|
16
|
+
* ```
|
17
|
+
*
|
18
|
+
* @see https://docs.mistral.ai/models/#chat-template
|
19
|
+
*/
|
20
|
+
function text() {
|
21
|
+
return {
|
22
|
+
stopSequences: [END_SEGMENT],
|
23
|
+
format(prompt) {
|
24
|
+
return `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${prompt}${END_INSTRUCTION}`;
|
25
|
+
},
|
26
|
+
};
|
27
|
+
}
|
28
|
+
exports.text = text;
|
29
|
+
/**
|
30
|
+
* Formats an instruction prompt as a Mistral instruct prompt.
|
31
|
+
*
|
32
|
+
* Note that Mistral does not support system prompts. We emulate them.
|
33
|
+
*
|
34
|
+
* Mistral prompt template when system prompt is set:
|
35
|
+
* ```
|
36
|
+
* <s>[INST] ${ system prompt } [/INST] </s>[INST] ${instruction} [/INST] ${ response prefix }
|
37
|
+
* ```
|
38
|
+
*
|
39
|
+
* Mistral prompt template when there is no system prompt:
|
40
|
+
* ```
|
41
|
+
* <s>[INST] ${ instruction } [/INST] ${ response prefix }
|
42
|
+
* ```
|
43
|
+
*
|
44
|
+
* @see https://docs.mistral.ai/models/#chat-template
|
45
|
+
*/
|
46
|
+
function instruction() {
|
47
|
+
return {
|
48
|
+
stopSequences: [END_SEGMENT],
|
49
|
+
format(prompt) {
|
50
|
+
const instruction = (0, ContentPart_js_1.validateContentIsString)(prompt.instruction, prompt);
|
51
|
+
if (prompt.system != null) {
|
52
|
+
return `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${prompt.system}${END_INSTRUCTION}${END_SEGMENT}${BEGIN_INSTRUCTION}${instruction}${END_INSTRUCTION}${prompt.responsePrefix ?? ""}`;
|
53
|
+
}
|
54
|
+
return `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${instruction}${END_INSTRUCTION}${prompt.responsePrefix ?? ""}`;
|
55
|
+
},
|
56
|
+
};
|
57
|
+
}
|
58
|
+
exports.instruction = instruction;
|
59
|
+
/**
|
60
|
+
* Formats a chat prompt as a Mistral instruct prompt.
|
61
|
+
*
|
62
|
+
* Note that Mistral does not support system prompts. We emulate them.
|
63
|
+
*
|
64
|
+
* Mistral prompt template when system prompt is set:
|
65
|
+
* ```
|
66
|
+
* <s>[INST] ${ system prompt } [/INST] </s> [INST] ${ user msg 1 } [/INST] ${ model response 1 } [INST] ${ user msg 2 } [/INST] ${ model response 2 } [INST] ${ user msg 3 } [/INST]
|
67
|
+
* ```
|
68
|
+
*
|
69
|
+
* Mistral prompt template when there is no system prompt:
|
70
|
+
* ```
|
71
|
+
* <s>[INST] ${ user msg 1 } [/INST] ${ model response 1 } </s>[INST] ${ user msg 2 } [/INST] ${ model response 2 } [INST] ${ user msg 3 } [/INST]
|
72
|
+
* ```
|
73
|
+
*
|
74
|
+
* @see https://docs.mistral.ai/models/#chat-template
|
75
|
+
*/
|
76
|
+
function chat() {
|
77
|
+
return {
|
78
|
+
format(prompt) {
|
79
|
+
validateMistralPrompt(prompt);
|
80
|
+
let text = "";
|
81
|
+
let i = 0;
|
82
|
+
// handle the special first segment
|
83
|
+
if (prompt.system != null) {
|
84
|
+
text += `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${prompt.system}${END_INSTRUCTION}${END_SEGMENT}`;
|
85
|
+
}
|
86
|
+
else {
|
87
|
+
// get content of the first message (validated to be a user message)
|
88
|
+
text = `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${prompt.messages[0].content}${END_INSTRUCTION}`;
|
89
|
+
// process 2nd message (validated to be an assistant message)
|
90
|
+
if (prompt.messages.length > 1) {
|
91
|
+
text += `${prompt.messages[1].content}${END_SEGMENT}`;
|
92
|
+
}
|
93
|
+
i = 2;
|
94
|
+
}
|
95
|
+
// process remaining messages
|
96
|
+
for (; i < prompt.messages.length; i++) {
|
97
|
+
const { role, content } = prompt.messages[i];
|
98
|
+
switch (role) {
|
99
|
+
case "user": {
|
100
|
+
const textContent = (0, ContentPart_js_1.validateContentIsString)(content, prompt);
|
101
|
+
text += `${BEGIN_INSTRUCTION}${textContent}${END_INSTRUCTION}`;
|
102
|
+
break;
|
103
|
+
}
|
104
|
+
case "assistant": {
|
105
|
+
text += (0, ContentPart_js_1.validateContentIsString)(content, prompt);
|
106
|
+
break;
|
107
|
+
}
|
108
|
+
case "tool": {
|
109
|
+
throw new InvalidPromptError_js_1.InvalidPromptError("Tool messages are not supported.", prompt);
|
110
|
+
}
|
111
|
+
default: {
|
112
|
+
const _exhaustiveCheck = role;
|
113
|
+
throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
|
114
|
+
}
|
115
|
+
}
|
116
|
+
}
|
117
|
+
return text;
|
118
|
+
},
|
119
|
+
stopSequences: [END_SEGMENT],
|
120
|
+
};
|
121
|
+
}
|
122
|
+
exports.chat = chat;
|
123
|
+
/**
|
124
|
+
* Checks if a Mistral chat prompt is valid. Throws a {@link ChatPromptValidationError} if it's not.
|
125
|
+
*
|
126
|
+
* - The first message of the chat must be a user message.
|
127
|
+
* - Then it must be alternating between an assistant message and a user message.
|
128
|
+
* - The last message must always be a user message (when submitting to a model).
|
129
|
+
*
|
130
|
+
* The type checking is done at runtime when you submit a chat prompt to a model with a prompt template.
|
131
|
+
*
|
132
|
+
* @throws {@link ChatPromptValidationError}
|
133
|
+
*/
|
134
|
+
function validateMistralPrompt(chatPrompt) {
|
135
|
+
const messages = chatPrompt.messages;
|
136
|
+
if (messages.length < 1) {
|
137
|
+
throw new InvalidPromptError_js_1.InvalidPromptError("ChatPrompt should have at least one message.", chatPrompt);
|
138
|
+
}
|
139
|
+
for (let i = 0; i < messages.length; i++) {
|
140
|
+
const expectedRole = i % 2 === 0 ? "user" : "assistant";
|
141
|
+
const role = messages[i].role;
|
142
|
+
if (role !== expectedRole) {
|
143
|
+
throw new InvalidPromptError_js_1.InvalidPromptError(`Message at index ${i} should have role '${expectedRole}', but has role '${role}'.`, chatPrompt);
|
144
|
+
}
|
145
|
+
}
|
146
|
+
if (messages.length % 2 === 0) {
|
147
|
+
throw new InvalidPromptError_js_1.InvalidPromptError("The last message must be a user message.", chatPrompt);
|
148
|
+
}
|
149
|
+
}
|
150
|
+
exports.validateMistralPrompt = validateMistralPrompt;
|
@@ -0,0 +1,62 @@
|
|
1
|
+
import { TextGenerationPromptTemplate } from "../TextGenerationPromptTemplate.js";
|
2
|
+
import { ChatPrompt } from "./ChatPrompt.js";
|
3
|
+
import { InstructionPrompt } from "./InstructionPrompt.js";
|
4
|
+
/**
|
5
|
+
* Formats a text prompt as a Mistral instruct prompt.
|
6
|
+
*
|
7
|
+
* Mistral prompt template:
|
8
|
+
* ```
|
9
|
+
* <s>[INST] { instruction } [/INST]
|
10
|
+
* ```
|
11
|
+
*
|
12
|
+
* @see https://docs.mistral.ai/models/#chat-template
|
13
|
+
*/
|
14
|
+
export declare function text(): TextGenerationPromptTemplate<string, string>;
|
15
|
+
/**
|
16
|
+
* Formats an instruction prompt as a Mistral instruct prompt.
|
17
|
+
*
|
18
|
+
* Note that Mistral does not support system prompts. We emulate them.
|
19
|
+
*
|
20
|
+
* Mistral prompt template when system prompt is set:
|
21
|
+
* ```
|
22
|
+
* <s>[INST] ${ system prompt } [/INST] </s>[INST] ${instruction} [/INST] ${ response prefix }
|
23
|
+
* ```
|
24
|
+
*
|
25
|
+
* Mistral prompt template when there is no system prompt:
|
26
|
+
* ```
|
27
|
+
* <s>[INST] ${ instruction } [/INST] ${ response prefix }
|
28
|
+
* ```
|
29
|
+
*
|
30
|
+
* @see https://docs.mistral.ai/models/#chat-template
|
31
|
+
*/
|
32
|
+
export declare function instruction(): TextGenerationPromptTemplate<InstructionPrompt, string>;
|
33
|
+
/**
|
34
|
+
* Formats a chat prompt as a Mistral instruct prompt.
|
35
|
+
*
|
36
|
+
* Note that Mistral does not support system prompts. We emulate them.
|
37
|
+
*
|
38
|
+
* Mistral prompt template when system prompt is set:
|
39
|
+
* ```
|
40
|
+
* <s>[INST] ${ system prompt } [/INST] </s> [INST] ${ user msg 1 } [/INST] ${ model response 1 } [INST] ${ user msg 2 } [/INST] ${ model response 2 } [INST] ${ user msg 3 } [/INST]
|
41
|
+
* ```
|
42
|
+
*
|
43
|
+
* Mistral prompt template when there is no system prompt:
|
44
|
+
* ```
|
45
|
+
* <s>[INST] ${ user msg 1 } [/INST] ${ model response 1 } </s>[INST] ${ user msg 2 } [/INST] ${ model response 2 } [INST] ${ user msg 3 } [/INST]
|
46
|
+
* ```
|
47
|
+
*
|
48
|
+
* @see https://docs.mistral.ai/models/#chat-template
|
49
|
+
*/
|
50
|
+
export declare function chat(): TextGenerationPromptTemplate<ChatPrompt, string>;
|
51
|
+
/**
|
52
|
+
* Checks if a Mistral chat prompt is valid. Throws a {@link ChatPromptValidationError} if it's not.
|
53
|
+
*
|
54
|
+
* - The first message of the chat must be a user message.
|
55
|
+
* - Then it must be alternating between an assistant message and a user message.
|
56
|
+
* - The last message must always be a user message (when submitting to a model).
|
57
|
+
*
|
58
|
+
* The type checking is done at runtime when you submit a chat prompt to a model with a prompt template.
|
59
|
+
*
|
60
|
+
* @throws {@link ChatPromptValidationError}
|
61
|
+
*/
|
62
|
+
export declare function validateMistralPrompt(chatPrompt: ChatPrompt): void;
|
@@ -0,0 +1,143 @@
|
|
1
|
+
import { validateContentIsString } from "./ContentPart.js";
|
2
|
+
import { InvalidPromptError } from "./InvalidPromptError.js";
|
3
|
+
const BEGIN_SEGMENT = "<s>";
|
4
|
+
const END_SEGMENT = "</s>";
|
5
|
+
const BEGIN_INSTRUCTION = "[INST] ";
|
6
|
+
const END_INSTRUCTION = " [/INST] ";
|
7
|
+
/**
|
8
|
+
* Formats a text prompt as a Mistral instruct prompt.
|
9
|
+
*
|
10
|
+
* Mistral prompt template:
|
11
|
+
* ```
|
12
|
+
* <s>[INST] { instruction } [/INST]
|
13
|
+
* ```
|
14
|
+
*
|
15
|
+
* @see https://docs.mistral.ai/models/#chat-template
|
16
|
+
*/
|
17
|
+
export function text() {
|
18
|
+
return {
|
19
|
+
stopSequences: [END_SEGMENT],
|
20
|
+
format(prompt) {
|
21
|
+
return `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${prompt}${END_INSTRUCTION}`;
|
22
|
+
},
|
23
|
+
};
|
24
|
+
}
|
25
|
+
/**
|
26
|
+
* Formats an instruction prompt as a Mistral instruct prompt.
|
27
|
+
*
|
28
|
+
* Note that Mistral does not support system prompts. We emulate them.
|
29
|
+
*
|
30
|
+
* Mistral prompt template when system prompt is set:
|
31
|
+
* ```
|
32
|
+
* <s>[INST] ${ system prompt } [/INST] </s>[INST] ${instruction} [/INST] ${ response prefix }
|
33
|
+
* ```
|
34
|
+
*
|
35
|
+
* Mistral prompt template when there is no system prompt:
|
36
|
+
* ```
|
37
|
+
* <s>[INST] ${ instruction } [/INST] ${ response prefix }
|
38
|
+
* ```
|
39
|
+
*
|
40
|
+
* @see https://docs.mistral.ai/models/#chat-template
|
41
|
+
*/
|
42
|
+
export function instruction() {
|
43
|
+
return {
|
44
|
+
stopSequences: [END_SEGMENT],
|
45
|
+
format(prompt) {
|
46
|
+
const instruction = validateContentIsString(prompt.instruction, prompt);
|
47
|
+
if (prompt.system != null) {
|
48
|
+
return `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${prompt.system}${END_INSTRUCTION}${END_SEGMENT}${BEGIN_INSTRUCTION}${instruction}${END_INSTRUCTION}${prompt.responsePrefix ?? ""}`;
|
49
|
+
}
|
50
|
+
return `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${instruction}${END_INSTRUCTION}${prompt.responsePrefix ?? ""}`;
|
51
|
+
},
|
52
|
+
};
|
53
|
+
}
|
54
|
+
/**
|
55
|
+
* Formats a chat prompt as a Mistral instruct prompt.
|
56
|
+
*
|
57
|
+
* Note that Mistral does not support system prompts. We emulate them.
|
58
|
+
*
|
59
|
+
* Mistral prompt template when system prompt is set:
|
60
|
+
* ```
|
61
|
+
* <s>[INST] ${ system prompt } [/INST] </s> [INST] ${ user msg 1 } [/INST] ${ model response 1 } [INST] ${ user msg 2 } [/INST] ${ model response 2 } [INST] ${ user msg 3 } [/INST]
|
62
|
+
* ```
|
63
|
+
*
|
64
|
+
* Mistral prompt template when there is no system prompt:
|
65
|
+
* ```
|
66
|
+
* <s>[INST] ${ user msg 1 } [/INST] ${ model response 1 } </s>[INST] ${ user msg 2 } [/INST] ${ model response 2 } [INST] ${ user msg 3 } [/INST]
|
67
|
+
* ```
|
68
|
+
*
|
69
|
+
* @see https://docs.mistral.ai/models/#chat-template
|
70
|
+
*/
|
71
|
+
export function chat() {
|
72
|
+
return {
|
73
|
+
format(prompt) {
|
74
|
+
validateMistralPrompt(prompt);
|
75
|
+
let text = "";
|
76
|
+
let i = 0;
|
77
|
+
// handle the special first segment
|
78
|
+
if (prompt.system != null) {
|
79
|
+
text += `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${prompt.system}${END_INSTRUCTION}${END_SEGMENT}`;
|
80
|
+
}
|
81
|
+
else {
|
82
|
+
// get content of the first message (validated to be a user message)
|
83
|
+
text = `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${prompt.messages[0].content}${END_INSTRUCTION}`;
|
84
|
+
// process 2nd message (validated to be an assistant message)
|
85
|
+
if (prompt.messages.length > 1) {
|
86
|
+
text += `${prompt.messages[1].content}${END_SEGMENT}`;
|
87
|
+
}
|
88
|
+
i = 2;
|
89
|
+
}
|
90
|
+
// process remaining messages
|
91
|
+
for (; i < prompt.messages.length; i++) {
|
92
|
+
const { role, content } = prompt.messages[i];
|
93
|
+
switch (role) {
|
94
|
+
case "user": {
|
95
|
+
const textContent = validateContentIsString(content, prompt);
|
96
|
+
text += `${BEGIN_INSTRUCTION}${textContent}${END_INSTRUCTION}`;
|
97
|
+
break;
|
98
|
+
}
|
99
|
+
case "assistant": {
|
100
|
+
text += validateContentIsString(content, prompt);
|
101
|
+
break;
|
102
|
+
}
|
103
|
+
case "tool": {
|
104
|
+
throw new InvalidPromptError("Tool messages are not supported.", prompt);
|
105
|
+
}
|
106
|
+
default: {
|
107
|
+
const _exhaustiveCheck = role;
|
108
|
+
throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
|
109
|
+
}
|
110
|
+
}
|
111
|
+
}
|
112
|
+
return text;
|
113
|
+
},
|
114
|
+
stopSequences: [END_SEGMENT],
|
115
|
+
};
|
116
|
+
}
|
117
|
+
/**
|
118
|
+
* Checks if a Mistral chat prompt is valid. Throws a {@link ChatPromptValidationError} if it's not.
|
119
|
+
*
|
120
|
+
* - The first message of the chat must be a user message.
|
121
|
+
* - Then it must be alternating between an assistant message and a user message.
|
122
|
+
* - The last message must always be a user message (when submitting to a model).
|
123
|
+
*
|
124
|
+
* The type checking is done at runtime when you submit a chat prompt to a model with a prompt template.
|
125
|
+
*
|
126
|
+
* @throws {@link ChatPromptValidationError}
|
127
|
+
*/
|
128
|
+
export function validateMistralPrompt(chatPrompt) {
|
129
|
+
const messages = chatPrompt.messages;
|
130
|
+
if (messages.length < 1) {
|
131
|
+
throw new InvalidPromptError("ChatPrompt should have at least one message.", chatPrompt);
|
132
|
+
}
|
133
|
+
for (let i = 0; i < messages.length; i++) {
|
134
|
+
const expectedRole = i % 2 === 0 ? "user" : "assistant";
|
135
|
+
const role = messages[i].role;
|
136
|
+
if (role !== expectedRole) {
|
137
|
+
throw new InvalidPromptError(`Message at index ${i} should have role '${expectedRole}', but has role '${role}'.`, chatPrompt);
|
138
|
+
}
|
139
|
+
}
|
140
|
+
if (messages.length % 2 === 0) {
|
141
|
+
throw new InvalidPromptError("The last message must be a user message.", chatPrompt);
|
142
|
+
}
|
143
|
+
}
|