modelfusion 0.13.0 → 0.15.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +16 -10
- package/composed-function/summarize/summarizeRecursivelyWithTextGenerationAndTokenSplitting.cjs +1 -1
- package/composed-function/summarize/summarizeRecursivelyWithTextGenerationAndTokenSplitting.js +1 -1
- package/model-function/Model.d.ts +2 -2
- package/model-function/generate-text/TextGenerationModel.d.ts +18 -18
- package/model-function/generate-text/generateText.cjs +2 -2
- package/model-function/generate-text/generateText.js +2 -2
- package/model-provider/cohere/CohereTextGenerationModel.cjs +19 -20
- package/model-provider/cohere/CohereTextGenerationModel.d.ts +4 -9
- package/model-provider/cohere/CohereTextGenerationModel.js +19 -20
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.cjs +13 -18
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.d.ts +4 -8
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.js +13 -18
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.cjs +16 -16
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.d.ts +9 -14
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.js +16 -16
- package/model-provider/openai/OpenAITextGenerationModel.cjs +20 -18
- package/model-provider/openai/OpenAITextGenerationModel.d.ts +4 -9
- package/model-provider/openai/OpenAITextGenerationModel.js +20 -18
- package/model-provider/openai/chat/OpenAIChatModel.cjs +15 -18
- package/model-provider/openai/chat/OpenAIChatModel.d.ts +5 -8
- package/model-provider/openai/chat/OpenAIChatModel.js +15 -18
- package/package.json +3 -3
- package/prompt/{AlpacaPromptMapping.cjs → AlpacaPromptFormat.cjs} +6 -6
- package/prompt/{AlpacaPromptMapping.d.ts → AlpacaPromptFormat.d.ts} +3 -3
- package/prompt/{AlpacaPromptMapping.js → AlpacaPromptFormat.js} +4 -4
- package/prompt/{Llama2PromptMapping.cjs → Llama2PromptFormat.cjs} +13 -10
- package/prompt/Llama2PromptFormat.d.ts +13 -0
- package/prompt/{Llama2PromptMapping.js → Llama2PromptFormat.js} +10 -7
- package/prompt/{OpenAIChatPromptMapping.cjs → OpenAIChatPromptFormat.cjs} +15 -9
- package/prompt/OpenAIChatPromptFormat.d.ts +12 -0
- package/prompt/{OpenAIChatPromptMapping.js → OpenAIChatPromptFormat.js} +12 -6
- package/prompt/PromptFormat.d.ts +14 -0
- package/prompt/{PromptMappingTextGenerationModel.js → PromptFormatTextGenerationModel.cjs} +19 -28
- package/prompt/{PromptMappingTextGenerationModel.d.ts → PromptFormatTextGenerationModel.d.ts} +6 -9
- package/prompt/{PromptMappingTextGenerationModel.cjs → PromptFormatTextGenerationModel.js} +15 -32
- package/prompt/{TextPromptMapping.cjs → TextPromptFormat.cjs} +13 -10
- package/prompt/TextPromptFormat.d.ts +17 -0
- package/prompt/{TextPromptMapping.js → TextPromptFormat.js} +10 -7
- package/prompt/{VicunaPromptMapping.cjs → VicunaPromptFormat.cjs} +6 -6
- package/prompt/{VicunaPromptMapping.d.ts → VicunaPromptFormat.d.ts} +3 -3
- package/prompt/{VicunaPromptMapping.js → VicunaPromptFormat.js} +4 -4
- package/prompt/chat/trimChatPrompt.cjs +2 -2
- package/prompt/chat/trimChatPrompt.d.ts +1 -1
- package/prompt/chat/trimChatPrompt.js +2 -2
- package/prompt/index.cjs +7 -7
- package/prompt/index.d.ts +7 -7
- package/prompt/index.js +7 -7
- package/tool/WebSearchTool.cjs +7 -28
- package/tool/WebSearchTool.d.ts +6 -67
- package/tool/WebSearchTool.js +7 -28
- package/tool/executeTool.cjs +1 -0
- package/tool/executeTool.d.ts +5 -4
- package/tool/executeTool.js +1 -0
- package/prompt/Llama2PromptMapping.d.ts +0 -10
- package/prompt/OpenAIChatPromptMapping.d.ts +0 -6
- package/prompt/PromptMapping.d.ts +0 -7
- package/prompt/TextPromptMapping.d.ts +0 -14
- /package/prompt/{PromptMapping.cjs → PromptFormat.cjs} +0 -0
- /package/prompt/{PromptMapping.js → PromptFormat.js} +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
"use strict";
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
-
exports.
|
3
|
+
exports.Llama2ChatPromptFormat = exports.Llama2InstructionPromptFormat = void 0;
|
4
4
|
const validateChatPrompt_js_1 = require("./chat/validateChatPrompt.cjs");
|
5
5
|
// see https://github.com/facebookresearch/llama/blob/6c7fe276574e78057f917549435a2554000a876d/llama/generation.py#L44
|
6
6
|
const BEGIN_SEGMENT = "<s>";
|
@@ -10,19 +10,22 @@ const END_INSTRUCTION = "[/INST]\n";
|
|
10
10
|
const BEGIN_SYSTEM = "<<SYS>>\n";
|
11
11
|
const END_SYSTEM = "\n<</SYS>>\n\n";
|
12
12
|
/**
|
13
|
-
*
|
13
|
+
* Formats an instruction prompt as a Llama 2 prompt.
|
14
14
|
*
|
15
15
|
* @see https://www.philschmid.de/llama-2#how-to-prompt-llama-2-chat
|
16
16
|
*/
|
17
|
-
const
|
18
|
-
|
19
|
-
|
17
|
+
const Llama2InstructionPromptFormat = () => ({
|
18
|
+
stopSequences: [END_SEGMENT],
|
19
|
+
format: (instruction) => `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${instruction.system != null
|
20
20
|
? ` ${BEGIN_SYSTEM}${instruction.system}${END_SYSTEM}`
|
21
21
|
: ""} ${instruction.instruction}${instruction.input != null ? `\n\n${instruction.input}` : ""} ${END_INSTRUCTION}\n`,
|
22
22
|
});
|
23
|
-
exports.
|
24
|
-
|
25
|
-
|
23
|
+
exports.Llama2InstructionPromptFormat = Llama2InstructionPromptFormat;
|
24
|
+
/**
|
25
|
+
* Formats a chat prompt as a Llama 2 prompt.
|
26
|
+
*/
|
27
|
+
const Llama2ChatPromptFormat = () => ({
|
28
|
+
format: (chatPrompt) => {
|
26
29
|
(0, validateChatPrompt_js_1.validateChatPrompt)(chatPrompt);
|
27
30
|
let text = "";
|
28
31
|
for (let i = 0; i < chatPrompt.length; i++) {
|
@@ -51,6 +54,6 @@ const ChatToLlama2PromptMapping = () => ({
|
|
51
54
|
}
|
52
55
|
return text;
|
53
56
|
},
|
54
|
-
|
57
|
+
stopSequences: [END_SEGMENT],
|
55
58
|
});
|
56
|
-
exports.
|
59
|
+
exports.Llama2ChatPromptFormat = Llama2ChatPromptFormat;
|
@@ -0,0 +1,13 @@
|
|
1
|
+
import { PromptFormat } from "./PromptFormat.js";
|
2
|
+
import { InstructionPrompt } from "./InstructionPrompt.js";
|
3
|
+
import { ChatPrompt } from "./chat/ChatPrompt.js";
|
4
|
+
/**
|
5
|
+
* Formats an instruction prompt as a Llama 2 prompt.
|
6
|
+
*
|
7
|
+
* @see https://www.philschmid.de/llama-2#how-to-prompt-llama-2-chat
|
8
|
+
*/
|
9
|
+
export declare const Llama2InstructionPromptFormat: () => PromptFormat<InstructionPrompt, string>;
|
10
|
+
/**
|
11
|
+
* Formats a chat prompt as a Llama 2 prompt.
|
12
|
+
*/
|
13
|
+
export declare const Llama2ChatPromptFormat: () => PromptFormat<ChatPrompt, string>;
|
@@ -7,18 +7,21 @@ const END_INSTRUCTION = "[/INST]\n";
|
|
7
7
|
const BEGIN_SYSTEM = "<<SYS>>\n";
|
8
8
|
const END_SYSTEM = "\n<</SYS>>\n\n";
|
9
9
|
/**
|
10
|
-
*
|
10
|
+
* Formats an instruction prompt as a Llama 2 prompt.
|
11
11
|
*
|
12
12
|
* @see https://www.philschmid.de/llama-2#how-to-prompt-llama-2-chat
|
13
13
|
*/
|
14
|
-
export const
|
15
|
-
|
16
|
-
|
14
|
+
export const Llama2InstructionPromptFormat = () => ({
|
15
|
+
stopSequences: [END_SEGMENT],
|
16
|
+
format: (instruction) => `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${instruction.system != null
|
17
17
|
? ` ${BEGIN_SYSTEM}${instruction.system}${END_SYSTEM}`
|
18
18
|
: ""} ${instruction.instruction}${instruction.input != null ? `\n\n${instruction.input}` : ""} ${END_INSTRUCTION}\n`,
|
19
19
|
});
|
20
|
-
|
21
|
-
|
20
|
+
/**
|
21
|
+
* Formats a chat prompt as a Llama 2 prompt.
|
22
|
+
*/
|
23
|
+
export const Llama2ChatPromptFormat = () => ({
|
24
|
+
format: (chatPrompt) => {
|
22
25
|
validateChatPrompt(chatPrompt);
|
23
26
|
let text = "";
|
24
27
|
for (let i = 0; i < chatPrompt.length; i++) {
|
@@ -47,5 +50,5 @@ export const ChatToLlama2PromptMapping = () => ({
|
|
47
50
|
}
|
48
51
|
return text;
|
49
52
|
},
|
50
|
-
|
53
|
+
stopSequences: [END_SEGMENT],
|
51
54
|
});
|
@@ -1,9 +1,12 @@
|
|
1
1
|
"use strict";
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
-
exports.
|
3
|
+
exports.OpenAIChatChatPromptFormat = exports.OpenAIChatInstructionPromptFormat = void 0;
|
4
4
|
const validateChatPrompt_js_1 = require("./chat/validateChatPrompt.cjs");
|
5
|
-
|
6
|
-
|
5
|
+
/**
|
6
|
+
* Formats an instruction prompt as an OpenAI chat prompt.
|
7
|
+
*/
|
8
|
+
const OpenAIChatInstructionPromptFormat = () => ({
|
9
|
+
format: (instruction) => {
|
7
10
|
const messages = [];
|
8
11
|
if (instruction.system != null) {
|
9
12
|
messages.push({
|
@@ -23,11 +26,14 @@ const InstructionToOpenAIChatPromptMapping = () => ({
|
|
23
26
|
}
|
24
27
|
return messages;
|
25
28
|
},
|
26
|
-
|
29
|
+
stopSequences: [],
|
27
30
|
});
|
28
|
-
exports.
|
29
|
-
|
30
|
-
|
31
|
+
exports.OpenAIChatInstructionPromptFormat = OpenAIChatInstructionPromptFormat;
|
32
|
+
/**
|
33
|
+
* Formats a chat prompt as an OpenAI chat prompt.
|
34
|
+
*/
|
35
|
+
const OpenAIChatChatPromptFormat = () => ({
|
36
|
+
format: (chatPrompt) => {
|
31
37
|
(0, validateChatPrompt_js_1.validateChatPrompt)(chatPrompt);
|
32
38
|
const messages = [];
|
33
39
|
for (let i = 0; i < chatPrompt.length; i++) {
|
@@ -63,6 +69,6 @@ const ChatToOpenAIChatPromptMapping = () => ({
|
|
63
69
|
}
|
64
70
|
return messages;
|
65
71
|
},
|
66
|
-
|
72
|
+
stopSequences: [],
|
67
73
|
});
|
68
|
-
exports.
|
74
|
+
exports.OpenAIChatChatPromptFormat = OpenAIChatChatPromptFormat;
|
@@ -0,0 +1,12 @@
|
|
1
|
+
import { OpenAIChatMessage } from "../model-provider/openai/chat/OpenAIChatMessage.js";
|
2
|
+
import { ChatPrompt } from "./chat/ChatPrompt.js";
|
3
|
+
import { InstructionPrompt } from "./InstructionPrompt.js";
|
4
|
+
import { PromptFormat } from "./PromptFormat.js";
|
5
|
+
/**
|
6
|
+
* Formats an instruction prompt as an OpenAI chat prompt.
|
7
|
+
*/
|
8
|
+
export declare const OpenAIChatInstructionPromptFormat: () => PromptFormat<InstructionPrompt, Array<OpenAIChatMessage>>;
|
9
|
+
/**
|
10
|
+
* Formats a chat prompt as an OpenAI chat prompt.
|
11
|
+
*/
|
12
|
+
export declare const OpenAIChatChatPromptFormat: () => PromptFormat<ChatPrompt, Array<OpenAIChatMessage>>;
|
@@ -1,6 +1,9 @@
|
|
1
1
|
import { validateChatPrompt } from "./chat/validateChatPrompt.js";
|
2
|
-
|
3
|
-
|
2
|
+
/**
|
3
|
+
* Formats an instruction prompt as an OpenAI chat prompt.
|
4
|
+
*/
|
5
|
+
export const OpenAIChatInstructionPromptFormat = () => ({
|
6
|
+
format: (instruction) => {
|
4
7
|
const messages = [];
|
5
8
|
if (instruction.system != null) {
|
6
9
|
messages.push({
|
@@ -20,10 +23,13 @@ export const InstructionToOpenAIChatPromptMapping = () => ({
|
|
20
23
|
}
|
21
24
|
return messages;
|
22
25
|
},
|
23
|
-
|
26
|
+
stopSequences: [],
|
24
27
|
});
|
25
|
-
|
26
|
-
|
28
|
+
/**
|
29
|
+
* Formats a chat prompt as an OpenAI chat prompt.
|
30
|
+
*/
|
31
|
+
export const OpenAIChatChatPromptFormat = () => ({
|
32
|
+
format: (chatPrompt) => {
|
27
33
|
validateChatPrompt(chatPrompt);
|
28
34
|
const messages = [];
|
29
35
|
for (let i = 0; i < chatPrompt.length; i++) {
|
@@ -59,5 +65,5 @@ export const ChatToOpenAIChatPromptMapping = () => ({
|
|
59
65
|
}
|
60
66
|
return messages;
|
61
67
|
},
|
62
|
-
|
68
|
+
stopSequences: [],
|
63
69
|
});
|
@@ -0,0 +1,14 @@
|
|
1
|
+
/**
|
2
|
+
* Prompt formats format a source prompt into the structure of a target prompt.
|
3
|
+
*/
|
4
|
+
export interface PromptFormat<SOURCE_PROMPT, TARGET_PROMPT> {
|
5
|
+
/**
|
6
|
+
* Formats the source prompt into the structure of the target prompt.
|
7
|
+
*/
|
8
|
+
format(sourcePrompt: SOURCE_PROMPT): TARGET_PROMPT;
|
9
|
+
/**
|
10
|
+
* The texts that should be used as default stop sequences.
|
11
|
+
* This is e.g. important for chat formats.
|
12
|
+
*/
|
13
|
+
stopSequences: string[];
|
14
|
+
}
|
@@ -1,19 +1,22 @@
|
|
1
|
-
|
2
|
-
|
1
|
+
"use strict";
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
+
exports.PromptFormatTextGenerationModel = void 0;
|
4
|
+
class PromptFormatTextGenerationModel {
|
5
|
+
constructor({ model, promptFormat, }) {
|
3
6
|
Object.defineProperty(this, "model", {
|
4
7
|
enumerable: true,
|
5
8
|
configurable: true,
|
6
9
|
writable: true,
|
7
10
|
value: void 0
|
8
11
|
});
|
9
|
-
Object.defineProperty(this, "
|
12
|
+
Object.defineProperty(this, "promptFormat", {
|
10
13
|
enumerable: true,
|
11
14
|
configurable: true,
|
12
15
|
writable: true,
|
13
16
|
value: void 0
|
14
17
|
});
|
15
18
|
this.model = model;
|
16
|
-
this.
|
19
|
+
this.promptFormat = promptFormat;
|
17
20
|
}
|
18
21
|
get modelInformation() {
|
19
22
|
return this.model.modelInformation;
|
@@ -32,10 +35,10 @@ export class PromptMappingTextGenerationModel {
|
|
32
35
|
if (originalCountPromptTokens === undefined) {
|
33
36
|
return undefined;
|
34
37
|
}
|
35
|
-
return ((prompt) => originalCountPromptTokens(this.
|
38
|
+
return ((prompt) => originalCountPromptTokens(this.promptFormat.format(prompt)));
|
36
39
|
}
|
37
40
|
generateTextResponse(prompt, options) {
|
38
|
-
const mappedPrompt = this.
|
41
|
+
const mappedPrompt = this.promptFormat.format(prompt);
|
39
42
|
return this.model.generateTextResponse(mappedPrompt, options);
|
40
43
|
}
|
41
44
|
extractText(response) {
|
@@ -47,38 +50,26 @@ export class PromptMappingTextGenerationModel {
|
|
47
50
|
return undefined;
|
48
51
|
}
|
49
52
|
return ((prompt, options) => {
|
50
|
-
const mappedPrompt = this.
|
53
|
+
const mappedPrompt = this.promptFormat.format(prompt);
|
51
54
|
return originalGenerateDeltaStreamResponse(mappedPrompt, options);
|
52
55
|
});
|
53
56
|
}
|
54
57
|
get extractTextDelta() {
|
55
58
|
return this.model.extractTextDelta;
|
56
59
|
}
|
57
|
-
|
58
|
-
return new
|
59
|
-
model: this.
|
60
|
-
|
60
|
+
withPromptFormat(promptFormat) {
|
61
|
+
return new PromptFormatTextGenerationModel({
|
62
|
+
model: this.withSettings({
|
63
|
+
stopSequences: promptFormat.stopSequences,
|
64
|
+
}),
|
65
|
+
promptFormat,
|
61
66
|
});
|
62
67
|
}
|
63
68
|
withSettings(additionalSettings) {
|
64
|
-
return new
|
69
|
+
return new PromptFormatTextGenerationModel({
|
65
70
|
model: this.model.withSettings(additionalSettings),
|
66
|
-
|
67
|
-
});
|
68
|
-
}
|
69
|
-
get maxCompletionTokens() {
|
70
|
-
return this.model.maxCompletionTokens;
|
71
|
-
}
|
72
|
-
withMaxCompletionTokens(maxCompletionTokens) {
|
73
|
-
return new PromptMappingTextGenerationModel({
|
74
|
-
model: this.model.withMaxCompletionTokens(maxCompletionTokens),
|
75
|
-
promptMapping: this.promptMapping,
|
76
|
-
});
|
77
|
-
}
|
78
|
-
withStopTokens(stopTokens) {
|
79
|
-
return new PromptMappingTextGenerationModel({
|
80
|
-
model: this.model.withStopTokens(stopTokens),
|
81
|
-
promptMapping: this.promptMapping,
|
71
|
+
promptFormat: this.promptFormat,
|
82
72
|
});
|
83
73
|
}
|
84
74
|
}
|
75
|
+
exports.PromptFormatTextGenerationModel = PromptFormatTextGenerationModel;
|
package/prompt/{PromptMappingTextGenerationModel.d.ts → PromptFormatTextGenerationModel.d.ts}
RENAMED
@@ -1,13 +1,13 @@
|
|
1
1
|
import { FunctionOptions } from "../model-function/FunctionOptions.js";
|
2
2
|
import { DeltaEvent } from "../model-function/generate-text/DeltaEvent.js";
|
3
3
|
import { TextGenerationModel, TextGenerationModelSettings } from "../model-function/generate-text/TextGenerationModel.js";
|
4
|
-
import {
|
5
|
-
export declare class
|
4
|
+
import { PromptFormat } from "./PromptFormat.js";
|
5
|
+
export declare class PromptFormatTextGenerationModel<PROMPT, MODEL_PROMPT, RESPONSE, FULL_DELTA, SETTINGS extends TextGenerationModelSettings, MODEL extends TextGenerationModel<MODEL_PROMPT, RESPONSE, FULL_DELTA, SETTINGS>> implements TextGenerationModel<PROMPT, RESPONSE, FULL_DELTA, SETTINGS> {
|
6
6
|
private readonly model;
|
7
|
-
private readonly
|
8
|
-
constructor({ model,
|
7
|
+
private readonly promptFormat;
|
8
|
+
constructor({ model, promptFormat, }: {
|
9
9
|
model: MODEL;
|
10
|
-
|
10
|
+
promptFormat: PromptFormat<PROMPT, MODEL_PROMPT>;
|
11
11
|
});
|
12
12
|
get modelInformation(): import("../index.js").ModelInformation;
|
13
13
|
get settings(): SETTINGS;
|
@@ -18,9 +18,6 @@ export declare class PromptMappingTextGenerationModel<PROMPT, MODEL_PROMPT, RESP
|
|
18
18
|
extractText(response: RESPONSE): string;
|
19
19
|
get generateDeltaStreamResponse(): MODEL["generateDeltaStreamResponse"] extends undefined ? undefined : (prompt: PROMPT, options: FunctionOptions<SETTINGS>) => PromiseLike<AsyncIterable<DeltaEvent<FULL_DELTA>>>;
|
20
20
|
get extractTextDelta(): MODEL["extractTextDelta"];
|
21
|
-
|
21
|
+
withPromptFormat<INPUT_PROMPT>(promptFormat: PromptFormat<INPUT_PROMPT, PROMPT>): PromptFormatTextGenerationModel<INPUT_PROMPT, PROMPT, RESPONSE, FULL_DELTA, SETTINGS, this>;
|
22
22
|
withSettings(additionalSettings: Partial<SETTINGS>): this;
|
23
|
-
get maxCompletionTokens(): MODEL["maxCompletionTokens"];
|
24
|
-
withMaxCompletionTokens(maxCompletionTokens: number): this;
|
25
|
-
withStopTokens(stopTokens: string[]): this;
|
26
23
|
}
|
@@ -1,22 +1,19 @@
|
|
1
|
-
|
2
|
-
|
3
|
-
exports.PromptMappingTextGenerationModel = void 0;
|
4
|
-
class PromptMappingTextGenerationModel {
|
5
|
-
constructor({ model, promptMapping, }) {
|
1
|
+
export class PromptFormatTextGenerationModel {
|
2
|
+
constructor({ model, promptFormat, }) {
|
6
3
|
Object.defineProperty(this, "model", {
|
7
4
|
enumerable: true,
|
8
5
|
configurable: true,
|
9
6
|
writable: true,
|
10
7
|
value: void 0
|
11
8
|
});
|
12
|
-
Object.defineProperty(this, "
|
9
|
+
Object.defineProperty(this, "promptFormat", {
|
13
10
|
enumerable: true,
|
14
11
|
configurable: true,
|
15
12
|
writable: true,
|
16
13
|
value: void 0
|
17
14
|
});
|
18
15
|
this.model = model;
|
19
|
-
this.
|
16
|
+
this.promptFormat = promptFormat;
|
20
17
|
}
|
21
18
|
get modelInformation() {
|
22
19
|
return this.model.modelInformation;
|
@@ -35,10 +32,10 @@ class PromptMappingTextGenerationModel {
|
|
35
32
|
if (originalCountPromptTokens === undefined) {
|
36
33
|
return undefined;
|
37
34
|
}
|
38
|
-
return ((prompt) => originalCountPromptTokens(this.
|
35
|
+
return ((prompt) => originalCountPromptTokens(this.promptFormat.format(prompt)));
|
39
36
|
}
|
40
37
|
generateTextResponse(prompt, options) {
|
41
|
-
const mappedPrompt = this.
|
38
|
+
const mappedPrompt = this.promptFormat.format(prompt);
|
42
39
|
return this.model.generateTextResponse(mappedPrompt, options);
|
43
40
|
}
|
44
41
|
extractText(response) {
|
@@ -50,39 +47,25 @@ class PromptMappingTextGenerationModel {
|
|
50
47
|
return undefined;
|
51
48
|
}
|
52
49
|
return ((prompt, options) => {
|
53
|
-
const mappedPrompt = this.
|
50
|
+
const mappedPrompt = this.promptFormat.format(prompt);
|
54
51
|
return originalGenerateDeltaStreamResponse(mappedPrompt, options);
|
55
52
|
});
|
56
53
|
}
|
57
54
|
get extractTextDelta() {
|
58
55
|
return this.model.extractTextDelta;
|
59
56
|
}
|
60
|
-
|
61
|
-
return new
|
62
|
-
model: this.
|
63
|
-
|
57
|
+
withPromptFormat(promptFormat) {
|
58
|
+
return new PromptFormatTextGenerationModel({
|
59
|
+
model: this.withSettings({
|
60
|
+
stopSequences: promptFormat.stopSequences,
|
61
|
+
}),
|
62
|
+
promptFormat,
|
64
63
|
});
|
65
64
|
}
|
66
65
|
withSettings(additionalSettings) {
|
67
|
-
return new
|
66
|
+
return new PromptFormatTextGenerationModel({
|
68
67
|
model: this.model.withSettings(additionalSettings),
|
69
|
-
|
70
|
-
});
|
71
|
-
}
|
72
|
-
get maxCompletionTokens() {
|
73
|
-
return this.model.maxCompletionTokens;
|
74
|
-
}
|
75
|
-
withMaxCompletionTokens(maxCompletionTokens) {
|
76
|
-
return new PromptMappingTextGenerationModel({
|
77
|
-
model: this.model.withMaxCompletionTokens(maxCompletionTokens),
|
78
|
-
promptMapping: this.promptMapping,
|
79
|
-
});
|
80
|
-
}
|
81
|
-
withStopTokens(stopTokens) {
|
82
|
-
return new PromptMappingTextGenerationModel({
|
83
|
-
model: this.model.withStopTokens(stopTokens),
|
84
|
-
promptMapping: this.promptMapping,
|
68
|
+
promptFormat: this.promptFormat,
|
85
69
|
});
|
86
70
|
}
|
87
71
|
}
|
88
|
-
exports.PromptMappingTextGenerationModel = PromptMappingTextGenerationModel;
|
@@ -1,10 +1,13 @@
|
|
1
1
|
"use strict";
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
-
exports.
|
3
|
+
exports.TextChatPromptFormat = exports.TextInstructionPromptFormat = void 0;
|
4
4
|
const validateChatPrompt_js_1 = require("./chat/validateChatPrompt.cjs");
|
5
|
-
|
6
|
-
|
7
|
-
|
5
|
+
/**
|
6
|
+
* Formats an instruction prompt as a basic text prompt.
|
7
|
+
*/
|
8
|
+
const TextInstructionPromptFormat = () => ({
|
9
|
+
stopSequences: [],
|
10
|
+
format: (instruction) => {
|
8
11
|
let text = "";
|
9
12
|
if (instruction.system != null) {
|
10
13
|
text += `${instruction.system}\n\n`;
|
@@ -16,15 +19,15 @@ const InstructionToTextPromptMapping = () => ({
|
|
16
19
|
return text;
|
17
20
|
},
|
18
21
|
});
|
19
|
-
exports.
|
22
|
+
exports.TextInstructionPromptFormat = TextInstructionPromptFormat;
|
20
23
|
/**
|
21
|
-
*
|
24
|
+
* Formats a chat prompt as a basic text prompt.
|
22
25
|
*
|
23
26
|
* @param user The label of the user in the chat.
|
24
27
|
* @param ai The name of the AI in the chat.
|
25
28
|
*/
|
26
|
-
const
|
27
|
-
|
29
|
+
const TextChatPromptFormat = ({ user, ai }) => ({
|
30
|
+
format: (chatPrompt) => {
|
28
31
|
(0, validateChatPrompt_js_1.validateChatPrompt)(chatPrompt);
|
29
32
|
let text = "";
|
30
33
|
for (let i = 0; i < chatPrompt.length; i++) {
|
@@ -53,6 +56,6 @@ const ChatToTextPromptMapping = ({ user, ai }) => ({
|
|
53
56
|
text += `${ai}:\n`;
|
54
57
|
return text;
|
55
58
|
},
|
56
|
-
|
59
|
+
stopSequences: [`\n${user}:`],
|
57
60
|
});
|
58
|
-
exports.
|
61
|
+
exports.TextChatPromptFormat = TextChatPromptFormat;
|
@@ -0,0 +1,17 @@
|
|
1
|
+
import { PromptFormat } from "./PromptFormat.js";
|
2
|
+
import { InstructionPrompt } from "./InstructionPrompt.js";
|
3
|
+
import { ChatPrompt } from "./chat/ChatPrompt.js";
|
4
|
+
/**
|
5
|
+
* Formats an instruction prompt as a basic text prompt.
|
6
|
+
*/
|
7
|
+
export declare const TextInstructionPromptFormat: () => PromptFormat<InstructionPrompt, string>;
|
8
|
+
/**
|
9
|
+
* Formats a chat prompt as a basic text prompt.
|
10
|
+
*
|
11
|
+
* @param user The label of the user in the chat.
|
12
|
+
* @param ai The name of the AI in the chat.
|
13
|
+
*/
|
14
|
+
export declare const TextChatPromptFormat: ({ user, ai, }: {
|
15
|
+
user: string;
|
16
|
+
ai: string;
|
17
|
+
}) => PromptFormat<ChatPrompt, string>;
|
@@ -1,7 +1,10 @@
|
|
1
1
|
import { validateChatPrompt } from "./chat/validateChatPrompt.js";
|
2
|
-
|
3
|
-
|
4
|
-
|
2
|
+
/**
|
3
|
+
* Formats an instruction prompt as a basic text prompt.
|
4
|
+
*/
|
5
|
+
export const TextInstructionPromptFormat = () => ({
|
6
|
+
stopSequences: [],
|
7
|
+
format: (instruction) => {
|
5
8
|
let text = "";
|
6
9
|
if (instruction.system != null) {
|
7
10
|
text += `${instruction.system}\n\n`;
|
@@ -14,13 +17,13 @@ export const InstructionToTextPromptMapping = () => ({
|
|
14
17
|
},
|
15
18
|
});
|
16
19
|
/**
|
17
|
-
*
|
20
|
+
* Formats a chat prompt as a basic text prompt.
|
18
21
|
*
|
19
22
|
* @param user The label of the user in the chat.
|
20
23
|
* @param ai The name of the AI in the chat.
|
21
24
|
*/
|
22
|
-
export const
|
23
|
-
|
25
|
+
export const TextChatPromptFormat = ({ user, ai }) => ({
|
26
|
+
format: (chatPrompt) => {
|
24
27
|
validateChatPrompt(chatPrompt);
|
25
28
|
let text = "";
|
26
29
|
for (let i = 0; i < chatPrompt.length; i++) {
|
@@ -49,5 +52,5 @@ export const ChatToTextPromptMapping = ({ user, ai }) => ({
|
|
49
52
|
text += `${ai}:\n`;
|
50
53
|
return text;
|
51
54
|
},
|
52
|
-
|
55
|
+
stopSequences: [`\n${user}:`],
|
53
56
|
});
|
@@ -1,10 +1,10 @@
|
|
1
1
|
"use strict";
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
-
exports.
|
3
|
+
exports.VicunaChatPromptFormat = void 0;
|
4
4
|
const validateChatPrompt_js_1 = require("./chat/validateChatPrompt.cjs");
|
5
5
|
const DEFAULT_SYSTEM_PROMPT = "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.";
|
6
6
|
/**
|
7
|
-
*
|
7
|
+
* Formats a chat prompt as a Vicuna prompt.
|
8
8
|
*
|
9
9
|
* Overridding the system message in the first chat message can affect model respones.
|
10
10
|
*
|
@@ -16,8 +16,8 @@ const DEFAULT_SYSTEM_PROMPT = "A chat between a curious user and an artificial i
|
|
16
16
|
* ASSISTANT:
|
17
17
|
* ```
|
18
18
|
*/
|
19
|
-
const
|
20
|
-
|
19
|
+
const VicunaChatPromptFormat = () => ({
|
20
|
+
format: (chatPrompt) => {
|
21
21
|
(0, validateChatPrompt_js_1.validateChatPrompt)(chatPrompt);
|
22
22
|
let text = "";
|
23
23
|
for (let i = 0; i < chatPrompt.length; i++) {
|
@@ -50,6 +50,6 @@ const ChatToVicunaPromptMapping = () => ({
|
|
50
50
|
text += `ASSISTANT: `;
|
51
51
|
return text;
|
52
52
|
},
|
53
|
-
|
53
|
+
stopSequences: [`\nUSER:`],
|
54
54
|
});
|
55
|
-
exports.
|
55
|
+
exports.VicunaChatPromptFormat = VicunaChatPromptFormat;
|
@@ -1,7 +1,7 @@
|
|
1
|
-
import {
|
1
|
+
import { PromptFormat } from "./PromptFormat.js";
|
2
2
|
import { ChatPrompt } from "./chat/ChatPrompt.js";
|
3
3
|
/**
|
4
|
-
*
|
4
|
+
* Formats a chat prompt as a Vicuna prompt.
|
5
5
|
*
|
6
6
|
* Overridding the system message in the first chat message can affect model respones.
|
7
7
|
*
|
@@ -13,4 +13,4 @@ import { ChatPrompt } from "./chat/ChatPrompt.js";
|
|
13
13
|
* ASSISTANT:
|
14
14
|
* ```
|
15
15
|
*/
|
16
|
-
export declare const
|
16
|
+
export declare const VicunaChatPromptFormat: () => PromptFormat<ChatPrompt, string>;
|
@@ -1,7 +1,7 @@
|
|
1
1
|
import { validateChatPrompt } from "./chat/validateChatPrompt.js";
|
2
2
|
const DEFAULT_SYSTEM_PROMPT = "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.";
|
3
3
|
/**
|
4
|
-
*
|
4
|
+
* Formats a chat prompt as a Vicuna prompt.
|
5
5
|
*
|
6
6
|
* Overridding the system message in the first chat message can affect model respones.
|
7
7
|
*
|
@@ -13,8 +13,8 @@ const DEFAULT_SYSTEM_PROMPT = "A chat between a curious user and an artificial i
|
|
13
13
|
* ASSISTANT:
|
14
14
|
* ```
|
15
15
|
*/
|
16
|
-
export const
|
17
|
-
|
16
|
+
export const VicunaChatPromptFormat = () => ({
|
17
|
+
format: (chatPrompt) => {
|
18
18
|
validateChatPrompt(chatPrompt);
|
19
19
|
let text = "";
|
20
20
|
for (let i = 0; i < chatPrompt.length; i++) {
|
@@ -47,5 +47,5 @@ export const ChatToVicunaPromptMapping = () => ({
|
|
47
47
|
text += `ASSISTANT: `;
|
48
48
|
return text;
|
49
49
|
},
|
50
|
-
|
50
|
+
stopSequences: [`\nUSER:`],
|
51
51
|
});
|
@@ -10,10 +10,10 @@ const validateChatPrompt_js_1 = require("./validateChatPrompt.cjs");
|
|
10
10
|
* When the minimal chat prompt (system message + last user message) is already too long, it will only
|
11
11
|
* return this minimal chat prompt.
|
12
12
|
*
|
13
|
-
* @see https://modelfusion.dev/guide/function/generate-text/prompt-
|
13
|
+
* @see https://modelfusion.dev/guide/function/generate-text/prompt-format#limiting-the-chat-length
|
14
14
|
*/
|
15
15
|
async function trimChatPrompt({ prompt, model, tokenLimit = model.contextWindowSize -
|
16
|
-
(model.maxCompletionTokens ?? model.contextWindowSize / 4), }) {
|
16
|
+
(model.settings.maxCompletionTokens ?? model.contextWindowSize / 4), }) {
|
17
17
|
(0, validateChatPrompt_js_1.validateChatPrompt)(prompt);
|
18
18
|
const startsWithSystemMessage = "system" in prompt[0];
|
19
19
|
const systemMessage = startsWithSystemMessage ? [prompt[0]] : [];
|
@@ -8,7 +8,7 @@ import { ChatPrompt } from "./ChatPrompt.js";
|
|
8
8
|
* When the minimal chat prompt (system message + last user message) is already too long, it will only
|
9
9
|
* return this minimal chat prompt.
|
10
10
|
*
|
11
|
-
* @see https://modelfusion.dev/guide/function/generate-text/prompt-
|
11
|
+
* @see https://modelfusion.dev/guide/function/generate-text/prompt-format#limiting-the-chat-length
|
12
12
|
*/
|
13
13
|
export declare function trimChatPrompt({ prompt, model, tokenLimit, }: {
|
14
14
|
prompt: ChatPrompt;
|
@@ -7,10 +7,10 @@ import { validateChatPrompt } from "./validateChatPrompt.js";
|
|
7
7
|
* When the minimal chat prompt (system message + last user message) is already too long, it will only
|
8
8
|
* return this minimal chat prompt.
|
9
9
|
*
|
10
|
-
* @see https://modelfusion.dev/guide/function/generate-text/prompt-
|
10
|
+
* @see https://modelfusion.dev/guide/function/generate-text/prompt-format#limiting-the-chat-length
|
11
11
|
*/
|
12
12
|
export async function trimChatPrompt({ prompt, model, tokenLimit = model.contextWindowSize -
|
13
|
-
(model.maxCompletionTokens ?? model.contextWindowSize / 4), }) {
|
13
|
+
(model.settings.maxCompletionTokens ?? model.contextWindowSize / 4), }) {
|
14
14
|
validateChatPrompt(prompt);
|
15
15
|
const startsWithSystemMessage = "system" in prompt[0];
|
16
16
|
const systemMessage = startsWithSystemMessage ? [prompt[0]] : [];
|