modelfusion 0.69.0 → 0.70.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +22 -9
- package/model-function/generate-text/index.cjs +1 -8
- package/model-function/generate-text/index.d.ts +1 -8
- package/model-function/generate-text/index.js +1 -8
- package/model-function/generate-text/prompt-format/AlpacaPromptFormat.cjs +31 -3
- package/model-function/generate-text/prompt-format/AlpacaPromptFormat.d.ts +29 -1
- package/model-function/generate-text/prompt-format/AlpacaPromptFormat.js +29 -1
- package/model-function/generate-text/prompt-format/ChatMLPromptFormat.cjs +79 -0
- package/model-function/generate-text/prompt-format/ChatMLPromptFormat.d.ts +31 -0
- package/model-function/generate-text/prompt-format/ChatMLPromptFormat.js +74 -0
- package/model-function/generate-text/prompt-format/ChatPrompt.d.ts +28 -23
- package/model-function/generate-text/prompt-format/ChatPromptValidationError.cjs +17 -0
- package/model-function/generate-text/prompt-format/ChatPromptValidationError.d.ts +8 -0
- package/model-function/generate-text/prompt-format/ChatPromptValidationError.js +13 -0
- package/model-function/generate-text/prompt-format/Llama2PromptFormat.cjs +41 -27
- package/model-function/generate-text/prompt-format/Llama2PromptFormat.d.ts +20 -2
- package/model-function/generate-text/prompt-format/Llama2PromptFormat.js +38 -24
- package/model-function/generate-text/prompt-format/TextPromptFormat.cjs +27 -30
- package/model-function/generate-text/prompt-format/TextPromptFormat.d.ts +7 -5
- package/model-function/generate-text/prompt-format/TextPromptFormat.js +24 -27
- package/model-function/generate-text/prompt-format/VicunaPromptFormat.cjs +21 -29
- package/model-function/generate-text/prompt-format/VicunaPromptFormat.d.ts +2 -2
- package/model-function/generate-text/prompt-format/VicunaPromptFormat.js +19 -27
- package/model-function/generate-text/prompt-format/index.cjs +39 -0
- package/model-function/generate-text/prompt-format/index.d.ts +10 -0
- package/model-function/generate-text/prompt-format/index.js +10 -0
- package/model-function/generate-text/prompt-format/trimChatPrompt.cjs +17 -22
- package/model-function/generate-text/prompt-format/trimChatPrompt.js +17 -22
- package/model-function/generate-text/prompt-format/validateChatPrompt.cjs +12 -24
- package/model-function/generate-text/prompt-format/validateChatPrompt.d.ts +0 -3
- package/model-function/generate-text/prompt-format/validateChatPrompt.js +10 -21
- package/model-provider/anthropic/AnthropicPromptFormat.cjs +22 -26
- package/model-provider/anthropic/AnthropicPromptFormat.d.ts +4 -2
- package/model-provider/anthropic/AnthropicPromptFormat.js +19 -23
- package/model-provider/anthropic/AnthropicTextGenerationModel.cjs +2 -2
- package/model-provider/anthropic/AnthropicTextGenerationModel.js +3 -3
- package/model-provider/anthropic/index.cjs +14 -2
- package/model-provider/anthropic/index.d.ts +1 -1
- package/model-provider/anthropic/index.js +1 -1
- package/model-provider/cohere/CohereTextGenerationModel.cjs +3 -3
- package/model-provider/cohere/CohereTextGenerationModel.d.ts +1 -1
- package/model-provider/cohere/CohereTextGenerationModel.js +4 -4
- package/model-provider/llamacpp/{mapInstructionPromptToBakLLaVA1ForLlamaCppFormat.cjs → LlamaCppBakLLaVA1Format.cjs} +4 -4
- package/model-provider/llamacpp/{mapInstructionPromptToBakLLaVA1ForLlamaCppFormat.d.ts → LlamaCppBakLLaVA1Format.d.ts} +2 -2
- package/model-provider/llamacpp/{mapInstructionPromptToBakLLaVA1ForLlamaCppFormat.js → LlamaCppBakLLaVA1Format.js} +2 -2
- package/model-provider/llamacpp/index.cjs +14 -2
- package/model-provider/llamacpp/index.d.ts +1 -1
- package/model-provider/llamacpp/index.js +1 -1
- package/model-provider/openai/OpenAICompletionModel.cjs +4 -4
- package/model-provider/openai/OpenAICompletionModel.d.ts +1 -1
- package/model-provider/openai/OpenAICompletionModel.js +5 -5
- package/model-provider/openai/chat/OpenAIChatModel.cjs +2 -2
- package/model-provider/openai/chat/OpenAIChatModel.d.ts +12 -12
- package/model-provider/openai/chat/OpenAIChatModel.js +3 -3
- package/model-provider/openai/chat/OpenAIChatPromptFormat.cjs +22 -34
- package/model-provider/openai/chat/OpenAIChatPromptFormat.d.ts +2 -2
- package/model-provider/openai/chat/OpenAIChatPromptFormat.js +19 -31
- package/model-provider/openai/index.cjs +14 -2
- package/model-provider/openai/index.d.ts +1 -1
- package/model-provider/openai/index.js +1 -1
- package/package.json +1 -1
@@ -9,9 +9,18 @@ const END_SYSTEM = "\n<</SYS>>\n\n";
|
|
9
9
|
/**
|
10
10
|
* Formats an instruction prompt as a Llama 2 prompt.
|
11
11
|
*
|
12
|
+
* Llama 2 prompt template:
|
13
|
+
* ```
|
14
|
+
* <s>[INST] <<SYS>>
|
15
|
+
* ${ system prompt }
|
16
|
+
* <</SYS>>
|
17
|
+
*
|
18
|
+
* { instruction } [/INST]
|
19
|
+
* ```
|
20
|
+
*
|
12
21
|
* @see https://www.philschmid.de/llama-2#how-to-prompt-llama-2-chat
|
13
22
|
*/
|
14
|
-
export function
|
23
|
+
export function instruction() {
|
15
24
|
return {
|
16
25
|
stopSequences: [END_SEGMENT],
|
17
26
|
format: (instruction) => `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${instruction.system != null
|
@@ -21,35 +30,40 @@ export function mapInstructionPromptToLlama2Format() {
|
|
21
30
|
}
|
22
31
|
/**
|
23
32
|
* Formats a chat prompt as a Llama 2 prompt.
|
33
|
+
*
|
34
|
+
* Llama 2 prompt template:
|
35
|
+
* ```
|
36
|
+
* <s>[INST] <<SYS>>
|
37
|
+
* ${ system prompt }
|
38
|
+
* <</SYS>>
|
39
|
+
*
|
40
|
+
* ${ user msg 1 } [/INST] ${ model response 1 } </s><s>[INST] ${ user msg 2 } [/INST] ${ model response 2 } </s><s>[INST] ${ user msg 3 } [/INST]
|
41
|
+
* ```
|
24
42
|
*/
|
25
|
-
export function
|
43
|
+
export function chat() {
|
26
44
|
return {
|
27
45
|
format: (chatPrompt) => {
|
28
46
|
validateChatPrompt(chatPrompt);
|
29
|
-
let text =
|
30
|
-
|
31
|
-
const message = chatPrompt[i];
|
32
|
-
// system message:
|
33
|
-
if (i === 0 &&
|
34
|
-
"system" in message &&
|
35
|
-
typeof message.system === "string") {
|
36
|
-
// Separate section for system message to simplify implementation
|
47
|
+
let text = chatPrompt.system != null
|
48
|
+
? // Separate section for system message to simplify implementation
|
37
49
|
// (this is slightly different from the original instructions):
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
+
`${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${BEGIN_SYSTEM}${chatPrompt.system}${END_SYSTEM}${END_INSTRUCTION}${END_SEGMENT}`
|
51
|
+
: "";
|
52
|
+
for (const { role, content } of chatPrompt.messages) {
|
53
|
+
switch (role) {
|
54
|
+
case "user": {
|
55
|
+
text += `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${content}${END_INSTRUCTION}`;
|
56
|
+
break;
|
57
|
+
}
|
58
|
+
case "assistant": {
|
59
|
+
text += `${content}${END_SEGMENT}`;
|
60
|
+
break;
|
61
|
+
}
|
62
|
+
default: {
|
63
|
+
const _exhaustiveCheck = role;
|
64
|
+
throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
|
65
|
+
}
|
50
66
|
}
|
51
|
-
// unsupported message:
|
52
|
-
throw new Error(`Unsupported message: ${JSON.stringify(message)}`);
|
53
67
|
}
|
54
68
|
return text;
|
55
69
|
},
|
@@ -1,11 +1,11 @@
|
|
1
1
|
"use strict";
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
-
exports.
|
3
|
+
exports.chat = exports.instruction = void 0;
|
4
4
|
const validateChatPrompt_js_1 = require("./validateChatPrompt.cjs");
|
5
5
|
/**
|
6
6
|
* Formats an instruction prompt as a basic text prompt.
|
7
7
|
*/
|
8
|
-
const
|
8
|
+
const instruction = () => ({
|
9
9
|
stopSequences: [],
|
10
10
|
format: (instruction) => {
|
11
11
|
let text = "";
|
@@ -19,43 +19,40 @@ const mapInstructionPromptToTextFormat = () => ({
|
|
19
19
|
return text;
|
20
20
|
},
|
21
21
|
});
|
22
|
-
exports.
|
22
|
+
exports.instruction = instruction;
|
23
23
|
/**
|
24
24
|
* Formats a chat prompt as a basic text prompt.
|
25
25
|
*
|
26
|
-
* @param user The label of the user in the chat.
|
27
|
-
* @param
|
26
|
+
* @param user The label of the user in the chat. Default to "user".
|
27
|
+
* @param assistant The label of the assistant in the chat. Default to "assistant".
|
28
|
+
* @param system The label of the system in the chat. Optional, defaults to no prefix.
|
28
29
|
*/
|
29
|
-
const
|
30
|
+
const chat = ({ user = "user", assistant = "assistant", system, } = {}) => ({
|
30
31
|
format: (chatPrompt) => {
|
31
32
|
(0, validateChatPrompt_js_1.validateChatPrompt)(chatPrompt);
|
32
|
-
let text =
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
"
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
text += `${ai}:\n${message.ai}\n\n`;
|
50
|
-
continue;
|
33
|
+
let text = chatPrompt.system != null
|
34
|
+
? `${system != null ? `${system}:` : ""}${chatPrompt.system}\n\n`
|
35
|
+
: "";
|
36
|
+
for (const { role, content } of chatPrompt.messages) {
|
37
|
+
switch (role) {
|
38
|
+
case "user": {
|
39
|
+
text += `${user}:\n${content}\n\n`;
|
40
|
+
break;
|
41
|
+
}
|
42
|
+
case "assistant": {
|
43
|
+
text += `${assistant}:\n${content}\n\n`;
|
44
|
+
break;
|
45
|
+
}
|
46
|
+
default: {
|
47
|
+
const _exhaustiveCheck = role;
|
48
|
+
throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
|
49
|
+
}
|
51
50
|
}
|
52
|
-
// unsupported message:
|
53
|
-
throw new Error(`Unsupported message: ${JSON.stringify(message)}`);
|
54
51
|
}
|
55
|
-
//
|
56
|
-
text += `${
|
52
|
+
// Assistant message prefix:
|
53
|
+
text += `${assistant}:\n`;
|
57
54
|
return text;
|
58
55
|
},
|
59
56
|
stopSequences: [`\n${user}:`],
|
60
57
|
});
|
61
|
-
exports.
|
58
|
+
exports.chat = chat;
|
@@ -4,14 +4,16 @@ import { TextGenerationPromptFormat } from "../TextGenerationPromptFormat.js";
|
|
4
4
|
/**
|
5
5
|
* Formats an instruction prompt as a basic text prompt.
|
6
6
|
*/
|
7
|
-
export declare const
|
7
|
+
export declare const instruction: () => TextGenerationPromptFormat<InstructionPrompt, string>;
|
8
8
|
/**
|
9
9
|
* Formats a chat prompt as a basic text prompt.
|
10
10
|
*
|
11
|
-
* @param user The label of the user in the chat.
|
12
|
-
* @param
|
11
|
+
* @param user The label of the user in the chat. Default to "user".
|
12
|
+
* @param assistant The label of the assistant in the chat. Default to "assistant".
|
13
|
+
* @param system The label of the system in the chat. Optional, defaults to no prefix.
|
13
14
|
*/
|
14
|
-
export declare const
|
15
|
+
export declare const chat: (options?: {
|
15
16
|
user?: string;
|
16
|
-
|
17
|
+
assistant?: string;
|
18
|
+
system?: string;
|
17
19
|
}) => TextGenerationPromptFormat<ChatPrompt, string>;
|
@@ -2,7 +2,7 @@ import { validateChatPrompt } from "./validateChatPrompt.js";
|
|
2
2
|
/**
|
3
3
|
* Formats an instruction prompt as a basic text prompt.
|
4
4
|
*/
|
5
|
-
export const
|
5
|
+
export const instruction = () => ({
|
6
6
|
stopSequences: [],
|
7
7
|
format: (instruction) => {
|
8
8
|
let text = "";
|
@@ -19,37 +19,34 @@ export const mapInstructionPromptToTextFormat = () => ({
|
|
19
19
|
/**
|
20
20
|
* Formats a chat prompt as a basic text prompt.
|
21
21
|
*
|
22
|
-
* @param user The label of the user in the chat.
|
23
|
-
* @param
|
22
|
+
* @param user The label of the user in the chat. Default to "user".
|
23
|
+
* @param assistant The label of the assistant in the chat. Default to "assistant".
|
24
|
+
* @param system The label of the system in the chat. Optional, defaults to no prefix.
|
24
25
|
*/
|
25
|
-
export const
|
26
|
+
export const chat = ({ user = "user", assistant = "assistant", system, } = {}) => ({
|
26
27
|
format: (chatPrompt) => {
|
27
28
|
validateChatPrompt(chatPrompt);
|
28
|
-
let text =
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
"
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
text += `${ai}:\n${message.ai}\n\n`;
|
46
|
-
continue;
|
29
|
+
let text = chatPrompt.system != null
|
30
|
+
? `${system != null ? `${system}:` : ""}${chatPrompt.system}\n\n`
|
31
|
+
: "";
|
32
|
+
for (const { role, content } of chatPrompt.messages) {
|
33
|
+
switch (role) {
|
34
|
+
case "user": {
|
35
|
+
text += `${user}:\n${content}\n\n`;
|
36
|
+
break;
|
37
|
+
}
|
38
|
+
case "assistant": {
|
39
|
+
text += `${assistant}:\n${content}\n\n`;
|
40
|
+
break;
|
41
|
+
}
|
42
|
+
default: {
|
43
|
+
const _exhaustiveCheck = role;
|
44
|
+
throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
|
45
|
+
}
|
47
46
|
}
|
48
|
-
// unsupported message:
|
49
|
-
throw new Error(`Unsupported message: ${JSON.stringify(message)}`);
|
50
47
|
}
|
51
|
-
//
|
52
|
-
text += `${
|
48
|
+
// Assistant message prefix:
|
49
|
+
text += `${assistant}:\n`;
|
53
50
|
return text;
|
54
51
|
},
|
55
52
|
stopSequences: [`\n${user}:`],
|
@@ -1,6 +1,6 @@
|
|
1
1
|
"use strict";
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
-
exports.
|
3
|
+
exports.chat = void 0;
|
4
4
|
const validateChatPrompt_js_1 = require("./validateChatPrompt.cjs");
|
5
5
|
// default Vicuna 1 system message
|
6
6
|
const DEFAULT_SYSTEM_MESSAGE = "A chat between a curious user and an artificial intelligence assistant. " +
|
@@ -8,7 +8,7 @@ const DEFAULT_SYSTEM_MESSAGE = "A chat between a curious user and an artificial
|
|
8
8
|
/**
|
9
9
|
* Formats a chat prompt as a Vicuna prompt.
|
10
10
|
*
|
11
|
-
*
|
11
|
+
* Overriding the system message in the first chat message can affect model responses.
|
12
12
|
*
|
13
13
|
* Vicuna prompt template:
|
14
14
|
* ```
|
@@ -18,36 +18,28 @@ const DEFAULT_SYSTEM_MESSAGE = "A chat between a curious user and an artificial
|
|
18
18
|
* ASSISTANT:
|
19
19
|
* ```
|
20
20
|
*/
|
21
|
-
function
|
21
|
+
function chat() {
|
22
22
|
return {
|
23
23
|
format: (chatPrompt) => {
|
24
24
|
(0, validateChatPrompt_js_1.validateChatPrompt)(chatPrompt);
|
25
|
-
let text =
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
"
|
31
|
-
|
32
|
-
|
33
|
-
|
25
|
+
let text = chatPrompt.system != null
|
26
|
+
? `${chatPrompt.system}\n\n`
|
27
|
+
: `${DEFAULT_SYSTEM_MESSAGE}\n\n`;
|
28
|
+
for (const { role, content } of chatPrompt.messages) {
|
29
|
+
switch (role) {
|
30
|
+
case "user": {
|
31
|
+
text += `USER: ${content}\n`;
|
32
|
+
break;
|
33
|
+
}
|
34
|
+
case "assistant": {
|
35
|
+
text += `ASSISTANT: ${content}\n`;
|
36
|
+
break;
|
37
|
+
}
|
38
|
+
default: {
|
39
|
+
const _exhaustiveCheck = role;
|
40
|
+
throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
|
41
|
+
}
|
34
42
|
}
|
35
|
-
// first message was not a system message:
|
36
|
-
if (i === 0) {
|
37
|
-
text += `${DEFAULT_SYSTEM_MESSAGE}\n\n`;
|
38
|
-
}
|
39
|
-
// user message
|
40
|
-
if ("user" in message) {
|
41
|
-
text += `USER: ${message.user}\n`;
|
42
|
-
continue;
|
43
|
-
}
|
44
|
-
// ai message:
|
45
|
-
if ("ai" in message) {
|
46
|
-
text += `ASSISTANT:\n${message.ai}\n`;
|
47
|
-
continue;
|
48
|
-
}
|
49
|
-
// unsupported message:
|
50
|
-
throw new Error(`Unsupported message: ${JSON.stringify(message)}`);
|
51
43
|
}
|
52
44
|
// AI message prefix:
|
53
45
|
text += `ASSISTANT: `;
|
@@ -56,4 +48,4 @@ function mapChatPromptToVicunaFormat() {
|
|
56
48
|
stopSequences: [`\nUSER:`],
|
57
49
|
};
|
58
50
|
}
|
59
|
-
exports.
|
51
|
+
exports.chat = chat;
|
@@ -3,7 +3,7 @@ import { TextGenerationPromptFormat } from "../TextGenerationPromptFormat.js";
|
|
3
3
|
/**
|
4
4
|
* Formats a chat prompt as a Vicuna prompt.
|
5
5
|
*
|
6
|
-
*
|
6
|
+
* Overriding the system message in the first chat message can affect model responses.
|
7
7
|
*
|
8
8
|
* Vicuna prompt template:
|
9
9
|
* ```
|
@@ -13,4 +13,4 @@ import { TextGenerationPromptFormat } from "../TextGenerationPromptFormat.js";
|
|
13
13
|
* ASSISTANT:
|
14
14
|
* ```
|
15
15
|
*/
|
16
|
-
export declare function
|
16
|
+
export declare function chat(): TextGenerationPromptFormat<ChatPrompt, string>;
|
@@ -5,7 +5,7 @@ const DEFAULT_SYSTEM_MESSAGE = "A chat between a curious user and an artificial
|
|
5
5
|
/**
|
6
6
|
* Formats a chat prompt as a Vicuna prompt.
|
7
7
|
*
|
8
|
-
*
|
8
|
+
* Overriding the system message in the first chat message can affect model responses.
|
9
9
|
*
|
10
10
|
* Vicuna prompt template:
|
11
11
|
* ```
|
@@ -15,36 +15,28 @@ const DEFAULT_SYSTEM_MESSAGE = "A chat between a curious user and an artificial
|
|
15
15
|
* ASSISTANT:
|
16
16
|
* ```
|
17
17
|
*/
|
18
|
-
export function
|
18
|
+
export function chat() {
|
19
19
|
return {
|
20
20
|
format: (chatPrompt) => {
|
21
21
|
validateChatPrompt(chatPrompt);
|
22
|
-
let text =
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
"
|
28
|
-
|
29
|
-
|
30
|
-
|
22
|
+
let text = chatPrompt.system != null
|
23
|
+
? `${chatPrompt.system}\n\n`
|
24
|
+
: `${DEFAULT_SYSTEM_MESSAGE}\n\n`;
|
25
|
+
for (const { role, content } of chatPrompt.messages) {
|
26
|
+
switch (role) {
|
27
|
+
case "user": {
|
28
|
+
text += `USER: ${content}\n`;
|
29
|
+
break;
|
30
|
+
}
|
31
|
+
case "assistant": {
|
32
|
+
text += `ASSISTANT: ${content}\n`;
|
33
|
+
break;
|
34
|
+
}
|
35
|
+
default: {
|
36
|
+
const _exhaustiveCheck = role;
|
37
|
+
throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
|
38
|
+
}
|
31
39
|
}
|
32
|
-
// first message was not a system message:
|
33
|
-
if (i === 0) {
|
34
|
-
text += `${DEFAULT_SYSTEM_MESSAGE}\n\n`;
|
35
|
-
}
|
36
|
-
// user message
|
37
|
-
if ("user" in message) {
|
38
|
-
text += `USER: ${message.user}\n`;
|
39
|
-
continue;
|
40
|
-
}
|
41
|
-
// ai message:
|
42
|
-
if ("ai" in message) {
|
43
|
-
text += `ASSISTANT:\n${message.ai}\n`;
|
44
|
-
continue;
|
45
|
-
}
|
46
|
-
// unsupported message:
|
47
|
-
throw new Error(`Unsupported message: ${JSON.stringify(message)}`);
|
48
40
|
}
|
49
41
|
// AI message prefix:
|
50
42
|
text += `ASSISTANT: `;
|
@@ -0,0 +1,39 @@
|
|
1
|
+
"use strict";
|
2
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
3
|
+
if (k2 === undefined) k2 = k;
|
4
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
5
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
6
|
+
desc = { enumerable: true, get: function() { return m[k]; } };
|
7
|
+
}
|
8
|
+
Object.defineProperty(o, k2, desc);
|
9
|
+
}) : (function(o, m, k, k2) {
|
10
|
+
if (k2 === undefined) k2 = k;
|
11
|
+
o[k2] = m[k];
|
12
|
+
}));
|
13
|
+
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
14
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
15
|
+
}) : function(o, v) {
|
16
|
+
o["default"] = v;
|
17
|
+
});
|
18
|
+
var __importStar = (this && this.__importStar) || function (mod) {
|
19
|
+
if (mod && mod.__esModule) return mod;
|
20
|
+
var result = {};
|
21
|
+
if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
|
22
|
+
__setModuleDefault(result, mod);
|
23
|
+
return result;
|
24
|
+
};
|
25
|
+
var __exportStar = (this && this.__exportStar) || function(m, exports) {
|
26
|
+
for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
|
27
|
+
};
|
28
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
29
|
+
exports.VicunaPromptFormat = exports.TextPromptFormat = exports.Llama2PromptFormat = exports.ChatMLPromptFormat = exports.AlpacaPromptFormat = void 0;
|
30
|
+
exports.AlpacaPromptFormat = __importStar(require("./AlpacaPromptFormat.cjs"));
|
31
|
+
exports.ChatMLPromptFormat = __importStar(require("./ChatMLPromptFormat.cjs"));
|
32
|
+
__exportStar(require("./ChatPrompt.cjs"), exports);
|
33
|
+
__exportStar(require("./ChatPromptValidationError.cjs"), exports);
|
34
|
+
__exportStar(require("./InstructionPrompt.cjs"), exports);
|
35
|
+
exports.Llama2PromptFormat = __importStar(require("./Llama2PromptFormat.cjs"));
|
36
|
+
exports.TextPromptFormat = __importStar(require("./TextPromptFormat.cjs"));
|
37
|
+
exports.VicunaPromptFormat = __importStar(require("./VicunaPromptFormat.cjs"));
|
38
|
+
__exportStar(require("./trimChatPrompt.cjs"), exports);
|
39
|
+
__exportStar(require("./validateChatPrompt.cjs"), exports);
|
@@ -0,0 +1,10 @@
|
|
1
|
+
export * as AlpacaPromptFormat from "./AlpacaPromptFormat.js";
|
2
|
+
export * as ChatMLPromptFormat from "./ChatMLPromptFormat.js";
|
3
|
+
export * from "./ChatPrompt.js";
|
4
|
+
export * from "./ChatPromptValidationError.js";
|
5
|
+
export * from "./InstructionPrompt.js";
|
6
|
+
export * as Llama2PromptFormat from "./Llama2PromptFormat.js";
|
7
|
+
export * as TextPromptFormat from "./TextPromptFormat.js";
|
8
|
+
export * as VicunaPromptFormat from "./VicunaPromptFormat.js";
|
9
|
+
export * from "./trimChatPrompt.js";
|
10
|
+
export * from "./validateChatPrompt.js";
|
@@ -0,0 +1,10 @@
|
|
1
|
+
export * as AlpacaPromptFormat from "./AlpacaPromptFormat.js";
|
2
|
+
export * as ChatMLPromptFormat from "./ChatMLPromptFormat.js";
|
3
|
+
export * from "./ChatPrompt.js";
|
4
|
+
export * from "./ChatPromptValidationError.js";
|
5
|
+
export * from "./InstructionPrompt.js";
|
6
|
+
export * as Llama2PromptFormat from "./Llama2PromptFormat.js";
|
7
|
+
export * as TextPromptFormat from "./TextPromptFormat.js";
|
8
|
+
export * as VicunaPromptFormat from "./VicunaPromptFormat.js";
|
9
|
+
export * from "./trimChatPrompt.js";
|
10
|
+
export * from "./validateChatPrompt.js";
|
@@ -15,39 +15,34 @@ const validateChatPrompt_js_1 = require("./validateChatPrompt.cjs");
|
|
15
15
|
async function trimChatPrompt({ prompt, model, tokenLimit = model.contextWindowSize -
|
16
16
|
(model.settings.maxCompletionTokens ?? model.contextWindowSize / 4), }) {
|
17
17
|
(0, validateChatPrompt_js_1.validateChatPrompt)(prompt);
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
messages.push(prompt[prompt.length - 1]);
|
18
|
+
let minimalPrompt = {
|
19
|
+
system: prompt.system,
|
20
|
+
messages: [prompt.messages[prompt.messages.length - 1]], // last user message
|
21
|
+
};
|
23
22
|
// check if the minimal prompt is already too long
|
24
|
-
const promptTokenCount = await model.countPromptTokens(
|
25
|
-
...systemMessage,
|
26
|
-
...messages,
|
27
|
-
]);
|
23
|
+
const promptTokenCount = await model.countPromptTokens(minimalPrompt);
|
28
24
|
// the minimal chat prompt is already over the token limit and cannot be trimmed further:
|
29
25
|
if (promptTokenCount > tokenLimit) {
|
30
|
-
return
|
26
|
+
return minimalPrompt;
|
31
27
|
}
|
32
28
|
// inner messages
|
33
|
-
const innerMessages = prompt.slice(
|
29
|
+
const innerMessages = prompt.messages.slice(0, -1);
|
34
30
|
// taking always a pair of user-message and ai-message from the end, moving backwards
|
35
31
|
for (let i = innerMessages.length - 1; i >= 0; i -= 2) {
|
36
|
-
const
|
32
|
+
const assistantMessage = innerMessages[i];
|
37
33
|
const userMessage = innerMessages[i - 1];
|
38
|
-
// create a temporary
|
39
|
-
const
|
40
|
-
|
41
|
-
userMessage,
|
42
|
-
|
43
|
-
|
44
|
-
]);
|
34
|
+
// create a temporary prompt and check if it fits within the token limit
|
35
|
+
const attemptedPrompt = {
|
36
|
+
system: prompt.system,
|
37
|
+
messages: [userMessage, assistantMessage, ...minimalPrompt.messages],
|
38
|
+
};
|
39
|
+
const tokenCount = await model.countPromptTokens(attemptedPrompt);
|
45
40
|
if (tokenCount > tokenLimit) {
|
46
41
|
break;
|
47
42
|
}
|
48
|
-
// if it fits,
|
49
|
-
|
43
|
+
// if it fits, its the new minimal prompt
|
44
|
+
minimalPrompt = attemptedPrompt;
|
50
45
|
}
|
51
|
-
return
|
46
|
+
return minimalPrompt;
|
52
47
|
}
|
53
48
|
exports.trimChatPrompt = trimChatPrompt;
|
@@ -12,38 +12,33 @@ import { validateChatPrompt } from "./validateChatPrompt.js";
|
|
12
12
|
export async function trimChatPrompt({ prompt, model, tokenLimit = model.contextWindowSize -
|
13
13
|
(model.settings.maxCompletionTokens ?? model.contextWindowSize / 4), }) {
|
14
14
|
validateChatPrompt(prompt);
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
messages.push(prompt[prompt.length - 1]);
|
15
|
+
let minimalPrompt = {
|
16
|
+
system: prompt.system,
|
17
|
+
messages: [prompt.messages[prompt.messages.length - 1]], // last user message
|
18
|
+
};
|
20
19
|
// check if the minimal prompt is already too long
|
21
|
-
const promptTokenCount = await model.countPromptTokens(
|
22
|
-
...systemMessage,
|
23
|
-
...messages,
|
24
|
-
]);
|
20
|
+
const promptTokenCount = await model.countPromptTokens(minimalPrompt);
|
25
21
|
// the minimal chat prompt is already over the token limit and cannot be trimmed further:
|
26
22
|
if (promptTokenCount > tokenLimit) {
|
27
|
-
return
|
23
|
+
return minimalPrompt;
|
28
24
|
}
|
29
25
|
// inner messages
|
30
|
-
const innerMessages = prompt.slice(
|
26
|
+
const innerMessages = prompt.messages.slice(0, -1);
|
31
27
|
// taking always a pair of user-message and ai-message from the end, moving backwards
|
32
28
|
for (let i = innerMessages.length - 1; i >= 0; i -= 2) {
|
33
|
-
const
|
29
|
+
const assistantMessage = innerMessages[i];
|
34
30
|
const userMessage = innerMessages[i - 1];
|
35
|
-
// create a temporary
|
36
|
-
const
|
37
|
-
|
38
|
-
userMessage,
|
39
|
-
|
40
|
-
|
41
|
-
]);
|
31
|
+
// create a temporary prompt and check if it fits within the token limit
|
32
|
+
const attemptedPrompt = {
|
33
|
+
system: prompt.system,
|
34
|
+
messages: [userMessage, assistantMessage, ...minimalPrompt.messages],
|
35
|
+
};
|
36
|
+
const tokenCount = await model.countPromptTokens(attemptedPrompt);
|
42
37
|
if (tokenCount > tokenLimit) {
|
43
38
|
break;
|
44
39
|
}
|
45
|
-
// if it fits,
|
46
|
-
|
40
|
+
// if it fits, its the new minimal prompt
|
41
|
+
minimalPrompt = attemptedPrompt;
|
47
42
|
}
|
48
|
-
return
|
43
|
+
return minimalPrompt;
|
49
44
|
}
|