modelfusion 0.69.0 → 0.71.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +26 -13
- package/model-function/ModelCallEvent.d.ts +3 -2
- package/model-function/generate-text/index.cjs +1 -8
- package/model-function/generate-text/index.d.ts +1 -8
- package/model-function/generate-text/index.js +1 -8
- package/model-function/generate-text/prompt-format/AlpacaPromptFormat.cjs +31 -3
- package/model-function/generate-text/prompt-format/AlpacaPromptFormat.d.ts +29 -1
- package/model-function/generate-text/prompt-format/AlpacaPromptFormat.js +29 -1
- package/model-function/generate-text/prompt-format/ChatMLPromptFormat.cjs +79 -0
- package/model-function/generate-text/prompt-format/ChatMLPromptFormat.d.ts +31 -0
- package/model-function/generate-text/prompt-format/ChatMLPromptFormat.js +74 -0
- package/model-function/generate-text/prompt-format/ChatPrompt.d.ts +28 -23
- package/model-function/generate-text/prompt-format/ChatPromptValidationError.cjs +17 -0
- package/model-function/generate-text/prompt-format/ChatPromptValidationError.d.ts +8 -0
- package/model-function/generate-text/prompt-format/ChatPromptValidationError.js +13 -0
- package/model-function/generate-text/prompt-format/Llama2PromptFormat.cjs +41 -27
- package/model-function/generate-text/prompt-format/Llama2PromptFormat.d.ts +20 -2
- package/model-function/generate-text/prompt-format/Llama2PromptFormat.js +38 -24
- package/model-function/generate-text/prompt-format/TextPromptFormat.cjs +27 -30
- package/model-function/generate-text/prompt-format/TextPromptFormat.d.ts +7 -5
- package/model-function/generate-text/prompt-format/TextPromptFormat.js +24 -27
- package/model-function/generate-text/prompt-format/VicunaPromptFormat.cjs +21 -29
- package/model-function/generate-text/prompt-format/VicunaPromptFormat.d.ts +2 -2
- package/model-function/generate-text/prompt-format/VicunaPromptFormat.js +19 -27
- package/model-function/generate-text/prompt-format/index.cjs +39 -0
- package/model-function/generate-text/prompt-format/index.d.ts +10 -0
- package/model-function/generate-text/prompt-format/index.js +10 -0
- package/model-function/generate-text/prompt-format/trimChatPrompt.cjs +17 -22
- package/model-function/generate-text/prompt-format/trimChatPrompt.js +17 -22
- package/model-function/generate-text/prompt-format/validateChatPrompt.cjs +12 -24
- package/model-function/generate-text/prompt-format/validateChatPrompt.d.ts +0 -3
- package/model-function/generate-text/prompt-format/validateChatPrompt.js +10 -21
- package/model-function/generate-tool-call/NoSuchToolDefinitionError.cjs +41 -0
- package/model-function/generate-tool-call/NoSuchToolDefinitionError.d.ts +17 -0
- package/model-function/generate-tool-call/NoSuchToolDefinitionError.js +37 -0
- package/model-function/generate-tool-call/ToolCall.d.ts +5 -0
- package/model-function/generate-tool-call/ToolCallGenerationModel.d.ts +3 -3
- package/model-function/generate-tool-call/ToolCallParametersValidationError.cjs +1 -1
- package/model-function/generate-tool-call/ToolCallParametersValidationError.js +1 -1
- package/model-function/generate-tool-call/ToolCallsOrTextGenerationEvent.cjs +2 -0
- package/model-function/generate-tool-call/ToolCallsOrTextGenerationEvent.d.ts +23 -0
- package/model-function/generate-tool-call/ToolCallsOrTextGenerationEvent.js +1 -0
- package/model-function/generate-tool-call/ToolCallsOrTextGenerationModel.cjs +2 -0
- package/model-function/generate-tool-call/ToolCallsOrTextGenerationModel.d.ts +21 -0
- package/model-function/generate-tool-call/ToolCallsOrTextGenerationModel.js +1 -0
- package/model-function/generate-tool-call/ToolDefinition.cjs +2 -0
- package/model-function/generate-tool-call/{ToolCallDefinition.d.ts → ToolDefinition.d.ts} +1 -1
- package/model-function/generate-tool-call/ToolDefinition.js +1 -0
- package/model-function/generate-tool-call/generateToolCall.cjs +2 -1
- package/model-function/generate-tool-call/generateToolCall.d.ts +6 -11
- package/model-function/generate-tool-call/generateToolCall.js +2 -1
- package/model-function/generate-tool-call/generateToolCallsOrText.cjs +63 -0
- package/model-function/generate-tool-call/generateToolCallsOrText.d.ts +33 -0
- package/model-function/generate-tool-call/generateToolCallsOrText.js +59 -0
- package/model-function/generate-tool-call/index.cjs +7 -2
- package/model-function/generate-tool-call/index.d.ts +7 -2
- package/model-function/generate-tool-call/index.js +7 -2
- package/model-provider/anthropic/AnthropicPromptFormat.cjs +22 -26
- package/model-provider/anthropic/AnthropicPromptFormat.d.ts +4 -2
- package/model-provider/anthropic/AnthropicPromptFormat.js +19 -23
- package/model-provider/anthropic/AnthropicTextGenerationModel.cjs +2 -2
- package/model-provider/anthropic/AnthropicTextGenerationModel.js +3 -3
- package/model-provider/anthropic/index.cjs +14 -2
- package/model-provider/anthropic/index.d.ts +1 -1
- package/model-provider/anthropic/index.js +1 -1
- package/model-provider/cohere/CohereTextGenerationModel.cjs +3 -3
- package/model-provider/cohere/CohereTextGenerationModel.d.ts +1 -1
- package/model-provider/cohere/CohereTextGenerationModel.js +4 -4
- package/model-provider/llamacpp/{mapInstructionPromptToBakLLaVA1ForLlamaCppFormat.cjs → LlamaCppBakLLaVA1Format.cjs} +4 -4
- package/model-provider/llamacpp/{mapInstructionPromptToBakLLaVA1ForLlamaCppFormat.d.ts → LlamaCppBakLLaVA1Format.d.ts} +2 -2
- package/model-provider/llamacpp/{mapInstructionPromptToBakLLaVA1ForLlamaCppFormat.js → LlamaCppBakLLaVA1Format.js} +2 -2
- package/model-provider/llamacpp/index.cjs +14 -2
- package/model-provider/llamacpp/index.d.ts +1 -1
- package/model-provider/llamacpp/index.js +1 -1
- package/model-provider/openai/OpenAICompletionModel.cjs +4 -4
- package/model-provider/openai/OpenAICompletionModel.d.ts +1 -1
- package/model-provider/openai/OpenAICompletionModel.js +5 -5
- package/model-provider/openai/chat/OpenAIChatMessage.d.ts +4 -1
- package/model-provider/openai/chat/OpenAIChatModel.cjs +29 -3
- package/model-provider/openai/chat/OpenAIChatModel.d.ts +63 -16
- package/model-provider/openai/chat/OpenAIChatModel.js +30 -4
- package/model-provider/openai/chat/OpenAIChatPromptFormat.cjs +22 -34
- package/model-provider/openai/chat/OpenAIChatPromptFormat.d.ts +2 -2
- package/model-provider/openai/chat/OpenAIChatPromptFormat.js +19 -31
- package/model-provider/openai/index.cjs +14 -2
- package/model-provider/openai/index.d.ts +1 -1
- package/model-provider/openai/index.js +1 -1
- package/package.json +2 -2
- package/tool/Tool.cjs +1 -1
- package/tool/Tool.d.ts +1 -1
- package/tool/Tool.js +1 -1
- /package/model-function/generate-tool-call/{ToolCallDefinition.cjs → ToolCall.cjs} +0 -0
- /package/model-function/generate-tool-call/{ToolCallDefinition.js → ToolCall.js} +0 -0
package/README.md
CHANGED
@@ -26,7 +26,7 @@
|
|
26
26
|
## Quick Install
|
27
27
|
|
28
28
|
> [!NOTE]
|
29
|
-
> ModelFusion is in its initial development phase. The main API is now mostly stable, but until version 1.0 there may be
|
29
|
+
> ModelFusion is in its initial development phase. The main API is now mostly stable, but until version 1.0 there may be breaking changes. Feedback and suggestions are welcome.
|
30
30
|
|
31
31
|
```sh
|
32
32
|
npm install modelfusion
|
@@ -52,7 +52,7 @@ const text = await generateText(
|
|
52
52
|
);
|
53
53
|
```
|
54
54
|
|
55
|
-
Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [
|
55
|
+
Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp), [Ollama](https://modelfusion.dev/integration/model-provider/ollama), [Hugging Face](https://modelfusion.dev/integration/model-provider/huggingface), [Cohere](https://modelfusion.dev/integration/model-provider/cohere), [Anthropic](https://modelfusion.dev/integration/model-provider/anthropic)
|
56
56
|
|
57
57
|
#### streamText
|
58
58
|
|
@@ -67,7 +67,7 @@ for await (const textPart of textStream) {
|
|
67
67
|
}
|
68
68
|
```
|
69
69
|
|
70
|
-
Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [
|
70
|
+
Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp), [Ollama](https://modelfusion.dev/integration/model-provider/ollama), [Cohere](https://modelfusion.dev/integration/model-provider/cohere), [Anthropic](https://modelfusion.dev/integration/model-provider/anthropic)
|
71
71
|
|
72
72
|
#### streamText with multi-modal prompt
|
73
73
|
|
@@ -304,7 +304,7 @@ const embeddings = await embedMany(
|
|
304
304
|
);
|
305
305
|
```
|
306
306
|
|
307
|
-
Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [
|
307
|
+
Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp), [Ollama](https://modelfusion.dev/integration/model-provider/ollama), [Hugging Face](https://modelfusion.dev/integration/model-provider/huggingface), [Cohere](https://modelfusion.dev/integration/model-provider/cohere)
|
308
308
|
|
309
309
|
### [Tokenize Text](https://modelfusion.dev/guide/function/tokenize-text)
|
310
310
|
|
@@ -322,7 +322,7 @@ const tokensAndTokenTexts = await tokenizer.tokenizeWithTexts(text);
|
|
322
322
|
const reconstructedText = await tokenizer.detokenize(tokens);
|
323
323
|
```
|
324
324
|
|
325
|
-
Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [
|
325
|
+
Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp), [Cohere](https://modelfusion.dev/integration/model-provider/cohere)
|
326
326
|
|
327
327
|
### [Guards](https://modelfusion.dev/guide/guard)
|
328
328
|
|
@@ -470,13 +470,14 @@ Prompt formats let you use higher level prompt structures (such as instruction o
|
|
470
470
|
#### [Text Generation Prompt Formats](https://modelfusion.dev/guide/function/generate-text#prompt-format)
|
471
471
|
|
472
472
|
```ts
|
473
|
+
// example assumes you are running https://huggingface.co/TheBloke/Llama-2-7B-GGUF with llama.cpp
|
473
474
|
const text = await generateText(
|
474
475
|
new LlamaCppTextGenerationModel({
|
475
476
|
contextWindowSize: 4096, // Llama 2 context window size
|
476
477
|
maxCompletionTokens: 1000,
|
477
478
|
})
|
478
|
-
.withTextPrompt()
|
479
|
-
.withPromptFormat(
|
479
|
+
.withTextPrompt() // pure text prompt (no images)
|
480
|
+
.withPromptFormat(Llama2PromptFormat.instruction()),
|
480
481
|
{
|
481
482
|
system: "You are a story writer.",
|
482
483
|
instruction: "Write a short story about a robot learning to love.",
|
@@ -491,12 +492,23 @@ const textStream = await streamText(
|
|
491
492
|
new OpenAIChatModel({
|
492
493
|
model: "gpt-3.5-turbo",
|
493
494
|
}).withChatPrompt(),
|
494
|
-
|
495
|
-
|
496
|
-
|
497
|
-
|
498
|
-
|
499
|
-
|
495
|
+
{
|
496
|
+
system: "You are a celebrated poet.",
|
497
|
+
messages: [
|
498
|
+
{
|
499
|
+
role: "user",
|
500
|
+
content: "Suggest a name for a robot.",
|
501
|
+
},
|
502
|
+
{
|
503
|
+
role: "assistant",
|
504
|
+
content: "I suggest the name Robbie",
|
505
|
+
},
|
506
|
+
{
|
507
|
+
role: "user",
|
508
|
+
content: "Write a short story about Robbie learning to love",
|
509
|
+
},
|
510
|
+
],
|
511
|
+
}
|
500
512
|
);
|
501
513
|
```
|
502
514
|
|
@@ -505,6 +517,7 @@ const textStream = await streamText(
|
|
505
517
|
| OpenAI Chat | ✅ | ✅ |
|
506
518
|
| Anthropic | ✅ | ✅ |
|
507
519
|
| Llama 2 | ✅ | ✅ |
|
520
|
+
| ChatML | ✅ | ✅ |
|
508
521
|
| Alpaca | ✅ | ❌ |
|
509
522
|
| Vicuna | ❌ | ✅ |
|
510
523
|
| Generic Text | ✅ | ✅ |
|
@@ -7,6 +7,7 @@ import { StructureGenerationFinishedEvent, StructureGenerationStartedEvent } fro
|
|
7
7
|
import { StructureStreamingFinishedEvent, StructureStreamingStartedEvent } from "./generate-structure/StructureStreamingEvent.js";
|
8
8
|
import { TextGenerationFinishedEvent, TextGenerationStartedEvent, TextStreamingFinishedEvent, TextStreamingStartedEvent } from "./generate-text/TextGenerationEvent.js";
|
9
9
|
import { ToolCallGenerationFinishedEvent, ToolCallGenerationStartedEvent } from "./generate-tool-call/ToolCallGenerationEvent.js";
|
10
|
+
import { ToolCallsOrTextGenerationFinishedEvent, ToolCallsOrTextGenerationStartedEvent } from "./generate-tool-call/ToolCallsOrTextGenerationEvent.js";
|
10
11
|
import { TranscriptionFinishedEvent, TranscriptionStartedEvent } from "./generate-transcription/TranscriptionEvent.js";
|
11
12
|
export interface BaseModelCallStartedEvent extends BaseFunctionStartedEvent {
|
12
13
|
model: ModelInformation;
|
@@ -48,5 +49,5 @@ export interface BaseModelCallFinishedEvent extends BaseFunctionFinishedEvent {
|
|
48
49
|
*/
|
49
50
|
result: BaseModelCallFinishedEventResult;
|
50
51
|
}
|
51
|
-
export type ModelCallStartedEvent = EmbeddingStartedEvent | ImageGenerationStartedEvent | SpeechGenerationStartedEvent | SpeechStreamingStartedEvent | StructureGenerationStartedEvent | StructureStreamingStartedEvent | TextGenerationStartedEvent | TextStreamingStartedEvent | ToolCallGenerationStartedEvent | TranscriptionStartedEvent;
|
52
|
-
export type ModelCallFinishedEvent = EmbeddingFinishedEvent | ImageGenerationFinishedEvent | SpeechGenerationFinishedEvent | SpeechStreamingFinishedEvent | StructureGenerationFinishedEvent | StructureStreamingFinishedEvent | TextGenerationFinishedEvent | TextStreamingFinishedEvent | ToolCallGenerationFinishedEvent | TranscriptionFinishedEvent;
|
52
|
+
export type ModelCallStartedEvent = EmbeddingStartedEvent | ImageGenerationStartedEvent | SpeechGenerationStartedEvent | SpeechStreamingStartedEvent | StructureGenerationStartedEvent | StructureStreamingStartedEvent | TextGenerationStartedEvent | TextStreamingStartedEvent | ToolCallGenerationStartedEvent | ToolCallsOrTextGenerationStartedEvent | TranscriptionStartedEvent;
|
53
|
+
export type ModelCallFinishedEvent = EmbeddingFinishedEvent | ImageGenerationFinishedEvent | SpeechGenerationFinishedEvent | SpeechStreamingFinishedEvent | StructureGenerationFinishedEvent | StructureStreamingFinishedEvent | TextGenerationFinishedEvent | TextStreamingFinishedEvent | ToolCallGenerationFinishedEvent | ToolCallsOrTextGenerationFinishedEvent | TranscriptionFinishedEvent;
|
@@ -20,12 +20,5 @@ __exportStar(require("./TextGenerationEvent.cjs"), exports);
|
|
20
20
|
__exportStar(require("./TextGenerationModel.cjs"), exports);
|
21
21
|
__exportStar(require("./TextGenerationPromptFormat.cjs"), exports);
|
22
22
|
__exportStar(require("./generateText.cjs"), exports);
|
23
|
-
__exportStar(require("./prompt-format/
|
24
|
-
__exportStar(require("./prompt-format/ChatPrompt.cjs"), exports);
|
25
|
-
__exportStar(require("./prompt-format/InstructionPrompt.cjs"), exports);
|
26
|
-
__exportStar(require("./prompt-format/Llama2PromptFormat.cjs"), exports);
|
27
|
-
__exportStar(require("./prompt-format/TextPromptFormat.cjs"), exports);
|
28
|
-
__exportStar(require("./prompt-format/VicunaPromptFormat.cjs"), exports);
|
29
|
-
__exportStar(require("./prompt-format/trimChatPrompt.cjs"), exports);
|
30
|
-
__exportStar(require("./prompt-format/validateChatPrompt.cjs"), exports);
|
23
|
+
__exportStar(require("./prompt-format/index.cjs"), exports);
|
31
24
|
__exportStar(require("./streamText.cjs"), exports);
|
@@ -4,12 +4,5 @@ export * from "./TextGenerationEvent.js";
|
|
4
4
|
export * from "./TextGenerationModel.js";
|
5
5
|
export * from "./TextGenerationPromptFormat.js";
|
6
6
|
export * from "./generateText.js";
|
7
|
-
export * from "./prompt-format/
|
8
|
-
export * from "./prompt-format/ChatPrompt.js";
|
9
|
-
export * from "./prompt-format/InstructionPrompt.js";
|
10
|
-
export * from "./prompt-format/Llama2PromptFormat.js";
|
11
|
-
export * from "./prompt-format/TextPromptFormat.js";
|
12
|
-
export * from "./prompt-format/VicunaPromptFormat.js";
|
13
|
-
export * from "./prompt-format/trimChatPrompt.js";
|
14
|
-
export * from "./prompt-format/validateChatPrompt.js";
|
7
|
+
export * from "./prompt-format/index.js";
|
15
8
|
export * from "./streamText.js";
|
@@ -4,12 +4,5 @@ export * from "./TextGenerationEvent.js";
|
|
4
4
|
export * from "./TextGenerationModel.js";
|
5
5
|
export * from "./TextGenerationPromptFormat.js";
|
6
6
|
export * from "./generateText.js";
|
7
|
-
export * from "./prompt-format/
|
8
|
-
export * from "./prompt-format/ChatPrompt.js";
|
9
|
-
export * from "./prompt-format/InstructionPrompt.js";
|
10
|
-
export * from "./prompt-format/Llama2PromptFormat.js";
|
11
|
-
export * from "./prompt-format/TextPromptFormat.js";
|
12
|
-
export * from "./prompt-format/VicunaPromptFormat.js";
|
13
|
-
export * from "./prompt-format/trimChatPrompt.js";
|
14
|
-
export * from "./prompt-format/validateChatPrompt.js";
|
7
|
+
export * from "./prompt-format/index.js";
|
15
8
|
export * from "./streamText.js";
|
@@ -1,6 +1,6 @@
|
|
1
1
|
"use strict";
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
-
exports.
|
3
|
+
exports.instruction = void 0;
|
4
4
|
const DEFAULT_SYSTEM_PROMPT_INPUT = "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.";
|
5
5
|
const DEFAULT_SYSTEM_PROMPT_NO_INPUT = "Below is an instruction that describes a task. Write a response that appropriately completes the request.";
|
6
6
|
/**
|
@@ -9,9 +9,37 @@ const DEFAULT_SYSTEM_PROMPT_NO_INPUT = "Below is an instruction that describes a
|
|
9
9
|
* If the instruction has a system prompt, it overrides the default system prompt
|
10
10
|
* (which can impact the results, because the model may be trained on the default system prompt).
|
11
11
|
*
|
12
|
+
* Prompt format with input:
|
13
|
+
* ```
|
14
|
+
* Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
|
15
|
+
*
|
16
|
+
* ### Instruction:
|
17
|
+
*
|
18
|
+
* {instruction}
|
19
|
+
*
|
20
|
+
* ### Input:
|
21
|
+
*
|
22
|
+
* {input}
|
23
|
+
*
|
24
|
+
* ### Response:
|
25
|
+
*
|
26
|
+
* ```
|
27
|
+
*
|
28
|
+
* Prompt format without input:
|
29
|
+
* ```
|
30
|
+
* Below is an instruction that describes a task. Write a response that appropriately completes the request.
|
31
|
+
*
|
32
|
+
* ### Instruction:
|
33
|
+
*
|
34
|
+
* {instruction}
|
35
|
+
*
|
36
|
+
* ### Response:
|
37
|
+
*
|
38
|
+
* ```
|
39
|
+
*
|
12
40
|
* @see https://github.com/tatsu-lab/stanford_alpaca#data-release
|
13
41
|
*/
|
14
|
-
function
|
42
|
+
function instruction() {
|
15
43
|
return {
|
16
44
|
stopSequences: [],
|
17
45
|
format: (instruction) => {
|
@@ -32,4 +60,4 @@ function mapInstructionPromptToAlpacaFormat() {
|
|
32
60
|
},
|
33
61
|
};
|
34
62
|
}
|
35
|
-
exports.
|
63
|
+
exports.instruction = instruction;
|
@@ -6,6 +6,34 @@ import { TextGenerationPromptFormat } from "../TextGenerationPromptFormat.js";
|
|
6
6
|
* If the instruction has a system prompt, it overrides the default system prompt
|
7
7
|
* (which can impact the results, because the model may be trained on the default system prompt).
|
8
8
|
*
|
9
|
+
* Prompt format with input:
|
10
|
+
* ```
|
11
|
+
* Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
|
12
|
+
*
|
13
|
+
* ### Instruction:
|
14
|
+
*
|
15
|
+
* {instruction}
|
16
|
+
*
|
17
|
+
* ### Input:
|
18
|
+
*
|
19
|
+
* {input}
|
20
|
+
*
|
21
|
+
* ### Response:
|
22
|
+
*
|
23
|
+
* ```
|
24
|
+
*
|
25
|
+
* Prompt format without input:
|
26
|
+
* ```
|
27
|
+
* Below is an instruction that describes a task. Write a response that appropriately completes the request.
|
28
|
+
*
|
29
|
+
* ### Instruction:
|
30
|
+
*
|
31
|
+
* {instruction}
|
32
|
+
*
|
33
|
+
* ### Response:
|
34
|
+
*
|
35
|
+
* ```
|
36
|
+
*
|
9
37
|
* @see https://github.com/tatsu-lab/stanford_alpaca#data-release
|
10
38
|
*/
|
11
|
-
export declare function
|
39
|
+
export declare function instruction(): TextGenerationPromptFormat<InstructionPrompt, string>;
|
@@ -6,9 +6,37 @@ const DEFAULT_SYSTEM_PROMPT_NO_INPUT = "Below is an instruction that describes a
|
|
6
6
|
* If the instruction has a system prompt, it overrides the default system prompt
|
7
7
|
* (which can impact the results, because the model may be trained on the default system prompt).
|
8
8
|
*
|
9
|
+
* Prompt format with input:
|
10
|
+
* ```
|
11
|
+
* Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
|
12
|
+
*
|
13
|
+
* ### Instruction:
|
14
|
+
*
|
15
|
+
* {instruction}
|
16
|
+
*
|
17
|
+
* ### Input:
|
18
|
+
*
|
19
|
+
* {input}
|
20
|
+
*
|
21
|
+
* ### Response:
|
22
|
+
*
|
23
|
+
* ```
|
24
|
+
*
|
25
|
+
* Prompt format without input:
|
26
|
+
* ```
|
27
|
+
* Below is an instruction that describes a task. Write a response that appropriately completes the request.
|
28
|
+
*
|
29
|
+
* ### Instruction:
|
30
|
+
*
|
31
|
+
* {instruction}
|
32
|
+
*
|
33
|
+
* ### Response:
|
34
|
+
*
|
35
|
+
* ```
|
36
|
+
*
|
9
37
|
* @see https://github.com/tatsu-lab/stanford_alpaca#data-release
|
10
38
|
*/
|
11
|
-
export function
|
39
|
+
export function instruction() {
|
12
40
|
return {
|
13
41
|
stopSequences: [],
|
14
42
|
format: (instruction) => {
|
@@ -0,0 +1,79 @@
|
|
1
|
+
"use strict";
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
+
exports.chat = exports.instruction = void 0;
|
4
|
+
const validateChatPrompt_js_1 = require("./validateChatPrompt.cjs");
|
5
|
+
const START_SEGMENT = "<|im_start|>";
|
6
|
+
const END_SEGMENT = "<|im_end|>";
|
7
|
+
function chatMLStart(role) {
|
8
|
+
return `${START_SEGMENT}${role}\n`;
|
9
|
+
}
|
10
|
+
function chatMLSegment(role, text) {
|
11
|
+
return text == null ? "" : `${chatMLStart(role)}${text}${END_SEGMENT}\n`;
|
12
|
+
}
|
13
|
+
/**
|
14
|
+
* Formats an instruction prompt using the ChatML format.
|
15
|
+
*
|
16
|
+
* ChatML prompt template:
|
17
|
+
* ```
|
18
|
+
* <|im_start|>system
|
19
|
+
* You are a helpful assistant that answers questions about the world.<|im_end|>
|
20
|
+
* <|im_start|>user
|
21
|
+
* What is the capital of France?<|im_end|>
|
22
|
+
* <|im_start|>assistant
|
23
|
+
* Paris<|im_end|>
|
24
|
+
* ```
|
25
|
+
*/
|
26
|
+
function instruction() {
|
27
|
+
return {
|
28
|
+
stopSequences: [END_SEGMENT],
|
29
|
+
format: (instruction) => chatMLSegment("system", instruction.system) +
|
30
|
+
chatMLSegment("user", instruction.instruction + instruction.input != null
|
31
|
+
? `\n\n${instruction.input}`
|
32
|
+
: ""),
|
33
|
+
};
|
34
|
+
}
|
35
|
+
exports.instruction = instruction;
|
36
|
+
/**
|
37
|
+
* Formats a chat prompt using the ChatML format.
|
38
|
+
*
|
39
|
+
* ChatML prompt template:
|
40
|
+
* ```
|
41
|
+
* <|im_start|>system
|
42
|
+
* You are a helpful assistant that answers questions about the world.<|im_end|>
|
43
|
+
* <|im_start|>user
|
44
|
+
* What is the capital of France?<|im_end|>
|
45
|
+
* <|im_start|>assistant
|
46
|
+
* Paris<|im_end|>
|
47
|
+
* ```
|
48
|
+
*/
|
49
|
+
function chat() {
|
50
|
+
return {
|
51
|
+
format: (chatPrompt) => {
|
52
|
+
(0, validateChatPrompt_js_1.validateChatPrompt)(chatPrompt);
|
53
|
+
let text = chatPrompt.system != null
|
54
|
+
? chatMLSegment("system", chatPrompt.system)
|
55
|
+
: "";
|
56
|
+
for (const { role, content } of chatPrompt.messages) {
|
57
|
+
switch (role) {
|
58
|
+
case "user": {
|
59
|
+
text += chatMLSegment("user", content);
|
60
|
+
break;
|
61
|
+
}
|
62
|
+
case "assistant": {
|
63
|
+
text += chatMLSegment("assistant", content);
|
64
|
+
break;
|
65
|
+
}
|
66
|
+
default: {
|
67
|
+
const _exhaustiveCheck = role;
|
68
|
+
throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
|
69
|
+
}
|
70
|
+
}
|
71
|
+
}
|
72
|
+
// prefix start of assistant response:
|
73
|
+
text += chatMLStart("assistant");
|
74
|
+
return text;
|
75
|
+
},
|
76
|
+
stopSequences: [END_SEGMENT],
|
77
|
+
};
|
78
|
+
}
|
79
|
+
exports.chat = chat;
|
@@ -0,0 +1,31 @@
|
|
1
|
+
import { ChatPrompt } from "./ChatPrompt.js";
|
2
|
+
import { InstructionPrompt } from "./InstructionPrompt.js";
|
3
|
+
import { TextGenerationPromptFormat } from "../TextGenerationPromptFormat.js";
|
4
|
+
/**
|
5
|
+
* Formats an instruction prompt using the ChatML format.
|
6
|
+
*
|
7
|
+
* ChatML prompt template:
|
8
|
+
* ```
|
9
|
+
* <|im_start|>system
|
10
|
+
* You are a helpful assistant that answers questions about the world.<|im_end|>
|
11
|
+
* <|im_start|>user
|
12
|
+
* What is the capital of France?<|im_end|>
|
13
|
+
* <|im_start|>assistant
|
14
|
+
* Paris<|im_end|>
|
15
|
+
* ```
|
16
|
+
*/
|
17
|
+
export declare function instruction(): TextGenerationPromptFormat<InstructionPrompt, string>;
|
18
|
+
/**
|
19
|
+
* Formats a chat prompt using the ChatML format.
|
20
|
+
*
|
21
|
+
* ChatML prompt template:
|
22
|
+
* ```
|
23
|
+
* <|im_start|>system
|
24
|
+
* You are a helpful assistant that answers questions about the world.<|im_end|>
|
25
|
+
* <|im_start|>user
|
26
|
+
* What is the capital of France?<|im_end|>
|
27
|
+
* <|im_start|>assistant
|
28
|
+
* Paris<|im_end|>
|
29
|
+
* ```
|
30
|
+
*/
|
31
|
+
export declare function chat(): TextGenerationPromptFormat<ChatPrompt, string>;
|
@@ -0,0 +1,74 @@
|
|
1
|
+
import { validateChatPrompt } from "./validateChatPrompt.js";
|
2
|
+
const START_SEGMENT = "<|im_start|>";
|
3
|
+
const END_SEGMENT = "<|im_end|>";
|
4
|
+
function chatMLStart(role) {
|
5
|
+
return `${START_SEGMENT}${role}\n`;
|
6
|
+
}
|
7
|
+
function chatMLSegment(role, text) {
|
8
|
+
return text == null ? "" : `${chatMLStart(role)}${text}${END_SEGMENT}\n`;
|
9
|
+
}
|
10
|
+
/**
|
11
|
+
* Formats an instruction prompt using the ChatML format.
|
12
|
+
*
|
13
|
+
* ChatML prompt template:
|
14
|
+
* ```
|
15
|
+
* <|im_start|>system
|
16
|
+
* You are a helpful assistant that answers questions about the world.<|im_end|>
|
17
|
+
* <|im_start|>user
|
18
|
+
* What is the capital of France?<|im_end|>
|
19
|
+
* <|im_start|>assistant
|
20
|
+
* Paris<|im_end|>
|
21
|
+
* ```
|
22
|
+
*/
|
23
|
+
export function instruction() {
|
24
|
+
return {
|
25
|
+
stopSequences: [END_SEGMENT],
|
26
|
+
format: (instruction) => chatMLSegment("system", instruction.system) +
|
27
|
+
chatMLSegment("user", instruction.instruction + instruction.input != null
|
28
|
+
? `\n\n${instruction.input}`
|
29
|
+
: ""),
|
30
|
+
};
|
31
|
+
}
|
32
|
+
/**
|
33
|
+
* Formats a chat prompt using the ChatML format.
|
34
|
+
*
|
35
|
+
* ChatML prompt template:
|
36
|
+
* ```
|
37
|
+
* <|im_start|>system
|
38
|
+
* You are a helpful assistant that answers questions about the world.<|im_end|>
|
39
|
+
* <|im_start|>user
|
40
|
+
* What is the capital of France?<|im_end|>
|
41
|
+
* <|im_start|>assistant
|
42
|
+
* Paris<|im_end|>
|
43
|
+
* ```
|
44
|
+
*/
|
45
|
+
export function chat() {
|
46
|
+
return {
|
47
|
+
format: (chatPrompt) => {
|
48
|
+
validateChatPrompt(chatPrompt);
|
49
|
+
let text = chatPrompt.system != null
|
50
|
+
? chatMLSegment("system", chatPrompt.system)
|
51
|
+
: "";
|
52
|
+
for (const { role, content } of chatPrompt.messages) {
|
53
|
+
switch (role) {
|
54
|
+
case "user": {
|
55
|
+
text += chatMLSegment("user", content);
|
56
|
+
break;
|
57
|
+
}
|
58
|
+
case "assistant": {
|
59
|
+
text += chatMLSegment("assistant", content);
|
60
|
+
break;
|
61
|
+
}
|
62
|
+
default: {
|
63
|
+
const _exhaustiveCheck = role;
|
64
|
+
throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
|
65
|
+
}
|
66
|
+
}
|
67
|
+
}
|
68
|
+
// prefix start of assistant response:
|
69
|
+
text += chatMLStart("assistant");
|
70
|
+
return text;
|
71
|
+
},
|
72
|
+
stopSequences: [END_SEGMENT],
|
73
|
+
};
|
74
|
+
}
|
@@ -1,33 +1,38 @@
|
|
1
1
|
/**
|
2
|
-
* A chat prompt is a
|
2
|
+
* A chat prompt is a combination of a system message and a list of messages with the following constraints:
|
3
3
|
*
|
4
|
-
* - A chat prompt can optionally
|
5
|
-
* -
|
6
|
-
* - Then it must be alternating between an
|
7
|
-
* - The last message must always be a user message.
|
4
|
+
* - A chat prompt can optionally have a system message.
|
5
|
+
* - The first message of the chat must be a user message.
|
6
|
+
* - Then it must be alternating between an assistant message and a user message.
|
7
|
+
* - The last message must always be a user message (when submitting to a model).
|
8
8
|
*
|
9
|
-
*
|
9
|
+
* You can use a ChatPrompt without an final user message when you e.g. want to display the current state of a conversation.
|
10
|
+
*
|
11
|
+
* The type checking is done at runtime when you submit a chat prompt to a model with a prompt format.
|
10
12
|
*
|
11
13
|
* @example
|
12
14
|
* ```ts
|
13
|
-
*
|
14
|
-
*
|
15
|
-
*
|
16
|
-
*
|
17
|
-
*
|
18
|
-
*
|
15
|
+
* const chatPrompt: ChatPrompt = {
|
16
|
+
* system: "You are a celebrated poet.",
|
17
|
+
* messages: [
|
18
|
+
* { role: "user", content: "Write a short story about a robot learning to love." },
|
19
|
+
* { role: "assistant", content: "Once upon a time, there was a robot who learned to love." },
|
20
|
+
* { role: "user", content: "That's a great start!" },
|
21
|
+
* ],
|
22
|
+
* };
|
19
23
|
* ```
|
20
24
|
*
|
21
25
|
* @see validateChatPrompt
|
22
26
|
*/
|
23
|
-
export type ChatPrompt =
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
27
|
+
export type ChatPrompt = {
|
28
|
+
system?: string;
|
29
|
+
messages: Array<ChatMessage>;
|
30
|
+
};
|
31
|
+
/**
|
32
|
+
* A message in a chat prompt.
|
33
|
+
* @see ChatPrompt
|
34
|
+
*/
|
35
|
+
export type ChatMessage = {
|
36
|
+
role: "user" | "assistant";
|
37
|
+
content: string;
|
38
|
+
};
|
@@ -0,0 +1,17 @@
|
|
1
|
+
"use strict";
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
+
exports.ChatPromptValidationError = void 0;
|
4
|
+
class ChatPromptValidationError extends Error {
|
5
|
+
constructor(message) {
|
6
|
+
super(message);
|
7
|
+
this.name = "ChatPromptValidationError";
|
8
|
+
}
|
9
|
+
toJSON() {
|
10
|
+
return {
|
11
|
+
name: this.name,
|
12
|
+
message: this.message,
|
13
|
+
stack: this.stack,
|
14
|
+
};
|
15
|
+
}
|
16
|
+
}
|
17
|
+
exports.ChatPromptValidationError = ChatPromptValidationError;
|