modelfusion 0.103.0 → 0.105.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +56 -0
- package/model-function/Delta.d.ts +1 -2
- package/model-function/executeStreamCall.cjs +6 -4
- package/model-function/executeStreamCall.d.ts +2 -2
- package/model-function/executeStreamCall.js +6 -4
- package/model-function/generate-speech/streamSpeech.cjs +1 -2
- package/model-function/generate-speech/streamSpeech.js +1 -2
- package/model-function/generate-structure/StructureFromTextStreamingModel.cjs +25 -29
- package/model-function/generate-structure/StructureFromTextStreamingModel.d.ts +3 -1
- package/model-function/generate-structure/StructureFromTextStreamingModel.js +25 -29
- package/model-function/generate-structure/StructureGenerationModel.d.ts +2 -0
- package/model-function/generate-structure/streamStructure.cjs +7 -8
- package/model-function/generate-structure/streamStructure.d.ts +1 -1
- package/model-function/generate-structure/streamStructure.js +7 -8
- package/model-function/generate-text/PromptTemplateFullTextModel.cjs +35 -0
- package/model-function/generate-text/PromptTemplateFullTextModel.d.ts +41 -0
- package/model-function/generate-text/PromptTemplateFullTextModel.js +31 -0
- package/model-function/generate-text/PromptTemplateTextStreamingModel.cjs +3 -0
- package/model-function/generate-text/PromptTemplateTextStreamingModel.d.ts +2 -1
- package/model-function/generate-text/PromptTemplateTextStreamingModel.js +3 -0
- package/model-function/generate-text/TextGenerationModel.d.ts +2 -1
- package/model-function/generate-text/index.cjs +1 -0
- package/model-function/generate-text/index.d.ts +1 -0
- package/model-function/generate-text/index.js +1 -0
- package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.cjs +2 -1
- package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.d.ts +2 -2
- package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.js +2 -1
- package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.cjs +9 -5
- package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.d.ts +4 -4
- package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.js +9 -5
- package/model-function/generate-text/prompt-template/ChatPrompt.cjs +38 -20
- package/model-function/generate-text/prompt-template/ChatPrompt.d.ts +33 -34
- package/model-function/generate-text/prompt-template/ChatPrompt.js +37 -18
- package/model-function/generate-text/prompt-template/ContentPart.cjs +11 -0
- package/model-function/generate-text/prompt-template/ContentPart.d.ts +30 -0
- package/model-function/generate-text/prompt-template/ContentPart.js +7 -0
- package/model-function/generate-text/prompt-template/InstructionPrompt.d.ts +7 -22
- package/model-function/generate-text/prompt-template/Llama2PromptTemplate.cjs +40 -6
- package/model-function/generate-text/prompt-template/Llama2PromptTemplate.d.ts +16 -4
- package/model-function/generate-text/prompt-template/Llama2PromptTemplate.js +38 -5
- package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.cjs +10 -5
- package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.d.ts +4 -4
- package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.js +10 -5
- package/model-function/generate-text/prompt-template/TextPromptTemplate.cjs +8 -5
- package/model-function/generate-text/prompt-template/TextPromptTemplate.d.ts +4 -4
- package/model-function/generate-text/prompt-template/TextPromptTemplate.js +8 -5
- package/model-function/generate-text/prompt-template/VicunaPromptTemplate.cjs +8 -4
- package/model-function/generate-text/prompt-template/VicunaPromptTemplate.d.ts +2 -2
- package/model-function/generate-text/prompt-template/VicunaPromptTemplate.js +8 -4
- package/model-function/generate-text/prompt-template/index.cjs +1 -1
- package/model-function/generate-text/prompt-template/index.d.ts +1 -1
- package/model-function/generate-text/prompt-template/index.js +1 -1
- package/model-function/generate-text/prompt-template/trimChatPrompt.cjs +0 -2
- package/model-function/generate-text/prompt-template/trimChatPrompt.d.ts +4 -4
- package/model-function/generate-text/prompt-template/trimChatPrompt.js +0 -2
- package/model-function/generate-text/streamText.cjs +27 -28
- package/model-function/generate-text/streamText.d.ts +1 -0
- package/model-function/generate-text/streamText.js +27 -28
- package/model-provider/anthropic/AnthropicPromptTemplate.cjs +9 -4
- package/model-provider/anthropic/AnthropicPromptTemplate.d.ts +4 -4
- package/model-provider/anthropic/AnthropicPromptTemplate.js +9 -4
- package/model-provider/anthropic/AnthropicTextGenerationModel.cjs +8 -14
- package/model-provider/anthropic/AnthropicTextGenerationModel.d.ts +13 -4
- package/model-provider/anthropic/AnthropicTextGenerationModel.js +8 -14
- package/model-provider/anthropic/AnthropicTextGenerationModel.test.cjs +44 -0
- package/model-provider/anthropic/AnthropicTextGenerationModel.test.js +42 -0
- package/model-provider/automatic1111/Automatic1111ImageGenerationModel.d.ts +1 -1
- package/model-provider/cohere/CohereTextGenerationModel.cjs +6 -44
- package/model-provider/cohere/CohereTextGenerationModel.d.ts +47 -13
- package/model-provider/cohere/CohereTextGenerationModel.js +7 -45
- package/model-provider/cohere/CohereTextGenerationModel.test.cjs +33 -0
- package/model-provider/cohere/CohereTextGenerationModel.test.js +31 -0
- package/model-provider/elevenlabs/ElevenLabsSpeechModel.cjs +1 -2
- package/model-provider/elevenlabs/ElevenLabsSpeechModel.js +1 -2
- package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.cjs +29 -17
- package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.d.ts +4 -4
- package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.js +29 -17
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.cjs +7 -14
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.d.ts +157 -6
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.js +8 -15
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.test.cjs +37 -0
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.test.d.ts +1 -0
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.test.js +35 -0
- package/model-provider/mistral/MistralChatModel.cjs +30 -104
- package/model-provider/mistral/MistralChatModel.d.ts +49 -16
- package/model-provider/mistral/MistralChatModel.js +30 -104
- package/model-provider/mistral/MistralChatModel.test.cjs +51 -0
- package/model-provider/mistral/MistralChatModel.test.d.ts +1 -0
- package/model-provider/mistral/MistralChatModel.test.js +49 -0
- package/model-provider/mistral/MistralPromptTemplate.cjs +13 -5
- package/model-provider/mistral/MistralPromptTemplate.d.ts +4 -4
- package/model-provider/mistral/MistralPromptTemplate.js +13 -5
- package/model-provider/ollama/OllamaChatModel.cjs +7 -43
- package/model-provider/ollama/OllamaChatModel.d.ts +63 -11
- package/model-provider/ollama/OllamaChatModel.js +7 -43
- package/model-provider/ollama/OllamaChatModel.test.cjs +27 -0
- package/model-provider/ollama/OllamaChatModel.test.d.ts +1 -0
- package/model-provider/ollama/OllamaChatModel.test.js +25 -0
- package/model-provider/ollama/OllamaChatPromptTemplate.cjs +43 -17
- package/model-provider/ollama/OllamaChatPromptTemplate.d.ts +4 -4
- package/model-provider/ollama/OllamaChatPromptTemplate.js +43 -17
- package/model-provider/ollama/OllamaCompletionModel.cjs +22 -43
- package/model-provider/ollama/OllamaCompletionModel.d.ts +65 -9
- package/model-provider/ollama/OllamaCompletionModel.js +23 -44
- package/model-provider/ollama/OllamaCompletionModel.test.cjs +101 -13
- package/model-provider/ollama/OllamaCompletionModel.test.js +78 -13
- package/model-provider/openai/{chat/AbstractOpenAIChatModel.cjs → AbstractOpenAIChatModel.cjs} +71 -15
- package/model-provider/openai/{chat/AbstractOpenAIChatModel.d.ts → AbstractOpenAIChatModel.d.ts} +273 -19
- package/model-provider/openai/{chat/AbstractOpenAIChatModel.js → AbstractOpenAIChatModel.js} +71 -15
- package/model-provider/openai/{chat/OpenAIChatFunctionCallStructureGenerationModel.cjs → OpenAIChatFunctionCallStructureGenerationModel.cjs} +18 -2
- package/model-provider/openai/{chat/OpenAIChatFunctionCallStructureGenerationModel.d.ts → OpenAIChatFunctionCallStructureGenerationModel.d.ts} +41 -11
- package/model-provider/openai/{chat/OpenAIChatFunctionCallStructureGenerationModel.js → OpenAIChatFunctionCallStructureGenerationModel.js} +18 -2
- package/model-provider/openai/{chat/OpenAIChatMessage.d.ts → OpenAIChatMessage.d.ts} +3 -3
- package/model-provider/openai/{chat/OpenAIChatModel.cjs → OpenAIChatModel.cjs} +5 -5
- package/model-provider/openai/{chat/OpenAIChatModel.d.ts → OpenAIChatModel.d.ts} +12 -12
- package/model-provider/openai/{chat/OpenAIChatModel.js → OpenAIChatModel.js} +5 -5
- package/model-provider/openai/OpenAIChatModel.test.cjs +94 -0
- package/model-provider/openai/OpenAIChatModel.test.d.ts +1 -0
- package/model-provider/openai/OpenAIChatModel.test.js +92 -0
- package/model-provider/openai/OpenAIChatPromptTemplate.cjs +114 -0
- package/model-provider/openai/OpenAIChatPromptTemplate.d.ts +20 -0
- package/model-provider/openai/OpenAIChatPromptTemplate.js +107 -0
- package/model-provider/openai/OpenAICompletionModel.cjs +32 -84
- package/model-provider/openai/OpenAICompletionModel.d.ts +29 -12
- package/model-provider/openai/OpenAICompletionModel.js +33 -85
- package/model-provider/openai/OpenAICompletionModel.test.cjs +53 -0
- package/model-provider/openai/OpenAICompletionModel.test.d.ts +1 -0
- package/model-provider/openai/OpenAICompletionModel.test.js +51 -0
- package/model-provider/openai/OpenAICostCalculator.cjs +1 -1
- package/model-provider/openai/OpenAICostCalculator.js +1 -1
- package/model-provider/openai/OpenAIFacade.cjs +2 -2
- package/model-provider/openai/OpenAIFacade.d.ts +3 -3
- package/model-provider/openai/OpenAIFacade.js +2 -2
- package/model-provider/openai/OpenAITranscriptionModel.d.ts +6 -6
- package/model-provider/openai/TikTokenTokenizer.d.ts +1 -1
- package/model-provider/openai/{chat/countOpenAIChatMessageTokens.cjs → countOpenAIChatMessageTokens.cjs} +2 -2
- package/model-provider/openai/{chat/countOpenAIChatMessageTokens.js → countOpenAIChatMessageTokens.js} +2 -2
- package/model-provider/openai/index.cjs +6 -6
- package/model-provider/openai/index.d.ts +5 -6
- package/model-provider/openai/index.js +5 -5
- package/model-provider/openai-compatible/OpenAICompatibleChatModel.cjs +4 -4
- package/model-provider/openai-compatible/OpenAICompatibleChatModel.d.ts +6 -6
- package/model-provider/openai-compatible/OpenAICompatibleChatModel.js +4 -4
- package/model-provider/stability/StabilityImageGenerationModel.d.ts +1 -1
- package/package.json +5 -5
- package/test/JsonTestServer.cjs +33 -0
- package/test/JsonTestServer.d.ts +7 -0
- package/test/JsonTestServer.js +29 -0
- package/test/StreamingTestServer.cjs +55 -0
- package/test/StreamingTestServer.d.ts +7 -0
- package/test/StreamingTestServer.js +51 -0
- package/test/arrayFromAsync.cjs +13 -0
- package/test/arrayFromAsync.d.ts +1 -0
- package/test/arrayFromAsync.js +9 -0
- package/util/streaming/createEventSourceResponseHandler.cjs +9 -0
- package/util/streaming/createEventSourceResponseHandler.d.ts +4 -0
- package/util/streaming/createEventSourceResponseHandler.js +5 -0
- package/util/streaming/createJsonStreamResponseHandler.cjs +9 -0
- package/util/streaming/createJsonStreamResponseHandler.d.ts +4 -0
- package/util/streaming/createJsonStreamResponseHandler.js +5 -0
- package/util/streaming/parseEventSourceStreamAsAsyncIterable.cjs +52 -0
- package/util/streaming/parseEventSourceStreamAsAsyncIterable.d.ts +6 -0
- package/util/streaming/parseEventSourceStreamAsAsyncIterable.js +48 -0
- package/util/streaming/parseJsonStreamAsAsyncIterable.cjs +21 -0
- package/util/streaming/parseJsonStreamAsAsyncIterable.d.ts +6 -0
- package/util/streaming/parseJsonStreamAsAsyncIterable.js +17 -0
- package/model-function/generate-text/prompt-template/Content.cjs +0 -2
- package/model-function/generate-text/prompt-template/Content.d.ts +0 -20
- package/model-provider/openai/chat/OpenAIChatModel.test.cjs +0 -61
- package/model-provider/openai/chat/OpenAIChatModel.test.js +0 -59
- package/model-provider/openai/chat/OpenAIChatPromptTemplate.cjs +0 -72
- package/model-provider/openai/chat/OpenAIChatPromptTemplate.d.ts +0 -20
- package/model-provider/openai/chat/OpenAIChatPromptTemplate.js +0 -65
- package/model-provider/openai/chat/OpenAIChatStreamIterable.cjs +0 -156
- package/model-provider/openai/chat/OpenAIChatStreamIterable.d.ts +0 -19
- package/model-provider/openai/chat/OpenAIChatStreamIterable.js +0 -152
- /package/{model-function/generate-text/prompt-template/Content.js → model-provider/anthropic/AnthropicTextGenerationModel.test.d.ts} +0 -0
- /package/model-provider/{openai/chat/OpenAIChatModel.test.d.ts → cohere/CohereTextGenerationModel.test.d.ts} +0 -0
- /package/model-provider/openai/{chat/OpenAIChatMessage.cjs → OpenAIChatMessage.cjs} +0 -0
- /package/model-provider/openai/{chat/OpenAIChatMessage.js → OpenAIChatMessage.js} +0 -0
- /package/model-provider/openai/{chat/countOpenAIChatMessageTokens.d.ts → countOpenAIChatMessageTokens.d.ts} +0 -0
@@ -1,4 +1,5 @@
|
|
1
|
-
import {
|
1
|
+
import { validateContentIsString } from "./ContentPart.js";
|
2
|
+
import { InvalidPromptError } from "./InvalidPromptError.js";
|
2
3
|
// see https://github.com/facebookresearch/llama/blob/6c7fe276574e78057f917549435a2554000a876d/llama/generation.py#L44
|
3
4
|
const BEGIN_SEGMENT = "<s>";
|
4
5
|
const END_SEGMENT = " </s>";
|
@@ -43,9 +44,10 @@ export function instruction() {
|
|
43
44
|
return {
|
44
45
|
stopSequences: [END_SEGMENT],
|
45
46
|
format(prompt) {
|
47
|
+
const instruction = validateContentIsString(prompt.instruction, prompt);
|
46
48
|
return `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${prompt.system != null
|
47
49
|
? `${BEGIN_SYSTEM}${prompt.system}${END_SYSTEM}`
|
48
|
-
: ""}${
|
50
|
+
: ""}${instruction}${END_INSTRUCTION}${prompt.responsePrefix ?? ""}`;
|
49
51
|
},
|
50
52
|
};
|
51
53
|
}
|
@@ -64,7 +66,7 @@ export function instruction() {
|
|
64
66
|
export function chat() {
|
65
67
|
return {
|
66
68
|
format(prompt) {
|
67
|
-
|
69
|
+
validateLlama2Prompt(prompt);
|
68
70
|
let text = prompt.system != null
|
69
71
|
? // Separate section for system message to simplify implementation
|
70
72
|
// (this is slightly different from the original instructions):
|
@@ -73,13 +75,17 @@ export function chat() {
|
|
73
75
|
for (const { role, content } of prompt.messages) {
|
74
76
|
switch (role) {
|
75
77
|
case "user": {
|
76
|
-
|
78
|
+
const textContent = validateContentIsString(content, prompt);
|
79
|
+
text += `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${textContent}${END_INSTRUCTION}`;
|
77
80
|
break;
|
78
81
|
}
|
79
82
|
case "assistant": {
|
80
|
-
text += `${content}${END_SEGMENT}`;
|
83
|
+
text += `${validateContentIsString(content, prompt)}${END_SEGMENT}`;
|
81
84
|
break;
|
82
85
|
}
|
86
|
+
case "tool": {
|
87
|
+
throw new InvalidPromptError("Tool messages are not supported.", prompt);
|
88
|
+
}
|
83
89
|
default: {
|
84
90
|
const _exhaustiveCheck = role;
|
85
91
|
throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
|
@@ -91,3 +97,30 @@ export function chat() {
|
|
91
97
|
stopSequences: [END_SEGMENT],
|
92
98
|
};
|
93
99
|
}
|
100
|
+
/**
|
101
|
+
* Checks if a Llama2 chat prompt is valid. Throws a {@link ChatPromptValidationError} if it's not.
|
102
|
+
*
|
103
|
+
* - The first message of the chat must be a user message.
|
104
|
+
* - Then it must be alternating between an assistant message and a user message.
|
105
|
+
* - The last message must always be a user message (when submitting to a model).
|
106
|
+
*
|
107
|
+
* The type checking is done at runtime when you submit a chat prompt to a model with a prompt template.
|
108
|
+
*
|
109
|
+
* @throws {@link ChatPromptValidationError}
|
110
|
+
*/
|
111
|
+
export function validateLlama2Prompt(chatPrompt) {
|
112
|
+
const messages = chatPrompt.messages;
|
113
|
+
if (messages.length < 1) {
|
114
|
+
throw new InvalidPromptError("ChatPrompt should have at least one message.", chatPrompt);
|
115
|
+
}
|
116
|
+
for (let i = 0; i < messages.length; i++) {
|
117
|
+
const expectedRole = i % 2 === 0 ? "user" : "assistant";
|
118
|
+
const role = messages[i].role;
|
119
|
+
if (role !== expectedRole) {
|
120
|
+
throw new InvalidPromptError(`Message at index ${i} should have role '${expectedRole}', but has role '${role}'.`, chatPrompt);
|
121
|
+
}
|
122
|
+
}
|
123
|
+
if (messages.length % 2 === 0) {
|
124
|
+
throw new InvalidPromptError("The last message must be a user message.", chatPrompt);
|
125
|
+
}
|
126
|
+
}
|
@@ -1,7 +1,8 @@
|
|
1
1
|
"use strict";
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
3
|
exports.chat = exports.instruction = exports.text = void 0;
|
4
|
-
const
|
4
|
+
const ContentPart_js_1 = require("./ContentPart.cjs");
|
5
|
+
const InvalidPromptError_js_1 = require("./InvalidPromptError.cjs");
|
5
6
|
const roleNames = {
|
6
7
|
system: "System",
|
7
8
|
user: "User",
|
@@ -36,8 +37,9 @@ exports.text = text;
|
|
36
37
|
const instruction = () => ({
|
37
38
|
stopSequences: [],
|
38
39
|
format(prompt) {
|
40
|
+
const instruction = (0, ContentPart_js_1.validateContentIsString)(prompt.instruction, prompt);
|
39
41
|
return (segment("system", prompt.system) +
|
40
|
-
segment("user",
|
42
|
+
segment("user", instruction) +
|
41
43
|
segmentStart("assistant") +
|
42
44
|
(prompt.responsePrefix ?? ""));
|
43
45
|
},
|
@@ -53,18 +55,21 @@ exports.instruction = instruction;
|
|
53
55
|
function chat() {
|
54
56
|
return {
|
55
57
|
format(prompt) {
|
56
|
-
(0, ChatPrompt_js_1.validateChatPrompt)(prompt);
|
57
58
|
let text = prompt.system != null ? segment("system", prompt.system) : "";
|
58
59
|
for (const { role, content } of prompt.messages) {
|
59
60
|
switch (role) {
|
60
61
|
case "user": {
|
61
|
-
|
62
|
+
const textContent = (0, ContentPart_js_1.validateContentIsString)(content, prompt);
|
63
|
+
text += segment("user", textContent);
|
62
64
|
break;
|
63
65
|
}
|
64
66
|
case "assistant": {
|
65
|
-
text += segment("assistant", content);
|
67
|
+
text += segment("assistant", (0, ContentPart_js_1.validateContentIsString)(content, prompt));
|
66
68
|
break;
|
67
69
|
}
|
70
|
+
case "tool": {
|
71
|
+
throw new InvalidPromptError_js_1.InvalidPromptError("Tool messages are not supported.", prompt);
|
72
|
+
}
|
68
73
|
default: {
|
69
74
|
const _exhaustiveCheck = role;
|
70
75
|
throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
|
@@ -1,6 +1,6 @@
|
|
1
1
|
import { TextGenerationPromptTemplate } from "../TextGenerationPromptTemplate.js";
|
2
|
-
import {
|
3
|
-
import {
|
2
|
+
import { ChatPrompt } from "./ChatPrompt.js";
|
3
|
+
import { InstructionPrompt } from "./InstructionPrompt.js";
|
4
4
|
/**
|
5
5
|
* Formats a text prompt as a neural chat prompt.
|
6
6
|
*
|
@@ -12,7 +12,7 @@ export declare function text(): TextGenerationPromptTemplate<string, string>;
|
|
12
12
|
*
|
13
13
|
* @see https://huggingface.co/Intel/neural-chat-7b-v3-1#prompt-template
|
14
14
|
*/
|
15
|
-
export declare const instruction: () => TextGenerationPromptTemplate<
|
15
|
+
export declare const instruction: () => TextGenerationPromptTemplate<InstructionPrompt, string>;
|
16
16
|
/**
|
17
17
|
* Formats a chat prompt as a basic text prompt.
|
18
18
|
*
|
@@ -20,4 +20,4 @@ export declare const instruction: () => TextGenerationPromptTemplate<TextInstruc
|
|
20
20
|
* @param assistant The label of the assistant in the chat. Default to "assistant".
|
21
21
|
* @param system The label of the system in the chat. Optional, defaults to no prefix.
|
22
22
|
*/
|
23
|
-
export declare function chat(): TextGenerationPromptTemplate<
|
23
|
+
export declare function chat(): TextGenerationPromptTemplate<ChatPrompt, string>;
|
@@ -1,4 +1,5 @@
|
|
1
|
-
import {
|
1
|
+
import { validateContentIsString } from "./ContentPart.js";
|
2
|
+
import { InvalidPromptError } from "./InvalidPromptError.js";
|
2
3
|
const roleNames = {
|
3
4
|
system: "System",
|
4
5
|
user: "User",
|
@@ -32,8 +33,9 @@ export function text() {
|
|
32
33
|
export const instruction = () => ({
|
33
34
|
stopSequences: [],
|
34
35
|
format(prompt) {
|
36
|
+
const instruction = validateContentIsString(prompt.instruction, prompt);
|
35
37
|
return (segment("system", prompt.system) +
|
36
|
-
segment("user",
|
38
|
+
segment("user", instruction) +
|
37
39
|
segmentStart("assistant") +
|
38
40
|
(prompt.responsePrefix ?? ""));
|
39
41
|
},
|
@@ -48,18 +50,21 @@ export const instruction = () => ({
|
|
48
50
|
export function chat() {
|
49
51
|
return {
|
50
52
|
format(prompt) {
|
51
|
-
validateChatPrompt(prompt);
|
52
53
|
let text = prompt.system != null ? segment("system", prompt.system) : "";
|
53
54
|
for (const { role, content } of prompt.messages) {
|
54
55
|
switch (role) {
|
55
56
|
case "user": {
|
56
|
-
|
57
|
+
const textContent = validateContentIsString(content, prompt);
|
58
|
+
text += segment("user", textContent);
|
57
59
|
break;
|
58
60
|
}
|
59
61
|
case "assistant": {
|
60
|
-
text += segment("assistant", content);
|
62
|
+
text += segment("assistant", validateContentIsString(content, prompt));
|
61
63
|
break;
|
62
64
|
}
|
65
|
+
case "tool": {
|
66
|
+
throw new InvalidPromptError("Tool messages are not supported.", prompt);
|
67
|
+
}
|
63
68
|
default: {
|
64
69
|
const _exhaustiveCheck = role;
|
65
70
|
throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
|
@@ -1,7 +1,8 @@
|
|
1
1
|
"use strict";
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
3
|
exports.chat = exports.instruction = exports.text = void 0;
|
4
|
-
const
|
4
|
+
const ContentPart_js_1 = require("./ContentPart.cjs");
|
5
|
+
const InvalidPromptError_js_1 = require("./InvalidPromptError.cjs");
|
5
6
|
/**
|
6
7
|
* Formats a text prompt as a basic text prompt. Does not change the text prompt in any way.
|
7
8
|
*/
|
@@ -20,7 +21,7 @@ const instruction = () => ({
|
|
20
21
|
if (prompt.system != null) {
|
21
22
|
text += `${prompt.system}\n\n`;
|
22
23
|
}
|
23
|
-
text += `${prompt.instruction}\n\n`;
|
24
|
+
text += `${(0, ContentPart_js_1.validateContentIsString)(prompt.instruction, prompt)}\n\n`;
|
24
25
|
if (prompt.responsePrefix != null) {
|
25
26
|
text += prompt.responsePrefix;
|
26
27
|
}
|
@@ -37,20 +38,22 @@ exports.instruction = instruction;
|
|
37
38
|
*/
|
38
39
|
const chat = ({ user = "user", assistant = "assistant", system, } = {}) => ({
|
39
40
|
format(prompt) {
|
40
|
-
(0, ChatPrompt_js_1.validateChatPrompt)(prompt);
|
41
41
|
let text = prompt.system != null
|
42
42
|
? `${system != null ? `${system}:` : ""}${prompt.system}\n\n`
|
43
43
|
: "";
|
44
44
|
for (const { role, content } of prompt.messages) {
|
45
45
|
switch (role) {
|
46
46
|
case "user": {
|
47
|
-
text += `${user}:\n${content}\n\n`;
|
47
|
+
text += `${user}:\n${(0, ContentPart_js_1.validateContentIsString)(content, prompt)}\n\n`;
|
48
48
|
break;
|
49
49
|
}
|
50
50
|
case "assistant": {
|
51
|
-
text += `${assistant}:\n${content}\n\n`;
|
51
|
+
text += `${assistant}:\n${(0, ContentPart_js_1.validateContentIsString)(content, prompt)}\n\n`;
|
52
52
|
break;
|
53
53
|
}
|
54
|
+
case "tool": {
|
55
|
+
throw new InvalidPromptError_js_1.InvalidPromptError("Tool messages are not supported.", prompt);
|
56
|
+
}
|
54
57
|
default: {
|
55
58
|
const _exhaustiveCheck = role;
|
56
59
|
throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
|
@@ -1,6 +1,6 @@
|
|
1
1
|
import { TextGenerationPromptTemplate } from "../TextGenerationPromptTemplate.js";
|
2
|
-
import {
|
3
|
-
import {
|
2
|
+
import { ChatPrompt } from "./ChatPrompt.js";
|
3
|
+
import { InstructionPrompt } from "./InstructionPrompt.js";
|
4
4
|
/**
|
5
5
|
* Formats a text prompt as a basic text prompt. Does not change the text prompt in any way.
|
6
6
|
*/
|
@@ -8,7 +8,7 @@ export declare const text: () => TextGenerationPromptTemplate<string, string>;
|
|
8
8
|
/**
|
9
9
|
* Formats an instruction prompt as a basic text prompt.
|
10
10
|
*/
|
11
|
-
export declare const instruction: () => TextGenerationPromptTemplate<
|
11
|
+
export declare const instruction: () => TextGenerationPromptTemplate<InstructionPrompt, string>;
|
12
12
|
/**
|
13
13
|
* Formats a chat prompt as a basic text prompt.
|
14
14
|
*
|
@@ -20,4 +20,4 @@ export declare const chat: (options?: {
|
|
20
20
|
user?: string;
|
21
21
|
assistant?: string;
|
22
22
|
system?: string;
|
23
|
-
}) => TextGenerationPromptTemplate<
|
23
|
+
}) => TextGenerationPromptTemplate<ChatPrompt, string>;
|
@@ -1,4 +1,5 @@
|
|
1
|
-
import {
|
1
|
+
import { validateContentIsString } from "./ContentPart.js";
|
2
|
+
import { InvalidPromptError } from "./InvalidPromptError.js";
|
2
3
|
/**
|
3
4
|
* Formats a text prompt as a basic text prompt. Does not change the text prompt in any way.
|
4
5
|
*/
|
@@ -16,7 +17,7 @@ export const instruction = () => ({
|
|
16
17
|
if (prompt.system != null) {
|
17
18
|
text += `${prompt.system}\n\n`;
|
18
19
|
}
|
19
|
-
text += `${prompt.instruction}\n\n`;
|
20
|
+
text += `${validateContentIsString(prompt.instruction, prompt)}\n\n`;
|
20
21
|
if (prompt.responsePrefix != null) {
|
21
22
|
text += prompt.responsePrefix;
|
22
23
|
}
|
@@ -32,20 +33,22 @@ export const instruction = () => ({
|
|
32
33
|
*/
|
33
34
|
export const chat = ({ user = "user", assistant = "assistant", system, } = {}) => ({
|
34
35
|
format(prompt) {
|
35
|
-
validateChatPrompt(prompt);
|
36
36
|
let text = prompt.system != null
|
37
37
|
? `${system != null ? `${system}:` : ""}${prompt.system}\n\n`
|
38
38
|
: "";
|
39
39
|
for (const { role, content } of prompt.messages) {
|
40
40
|
switch (role) {
|
41
41
|
case "user": {
|
42
|
-
text += `${user}:\n${content}\n\n`;
|
42
|
+
text += `${user}:\n${validateContentIsString(content, prompt)}\n\n`;
|
43
43
|
break;
|
44
44
|
}
|
45
45
|
case "assistant": {
|
46
|
-
text += `${assistant}:\n${content}\n\n`;
|
46
|
+
text += `${assistant}:\n${validateContentIsString(content, prompt)}\n\n`;
|
47
47
|
break;
|
48
48
|
}
|
49
|
+
case "tool": {
|
50
|
+
throw new InvalidPromptError("Tool messages are not supported.", prompt);
|
51
|
+
}
|
49
52
|
default: {
|
50
53
|
const _exhaustiveCheck = role;
|
51
54
|
throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
|
@@ -1,7 +1,8 @@
|
|
1
1
|
"use strict";
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
3
|
exports.chat = void 0;
|
4
|
-
const
|
4
|
+
const ContentPart_js_1 = require("./ContentPart.cjs");
|
5
|
+
const InvalidPromptError_js_1 = require("./InvalidPromptError.cjs");
|
5
6
|
// default Vicuna 1 system message
|
6
7
|
const DEFAULT_SYSTEM_MESSAGE = "A chat between a curious user and an artificial intelligence assistant. " +
|
7
8
|
"The assistant gives helpful, detailed, and polite answers to the user's questions.";
|
@@ -21,20 +22,23 @@ const DEFAULT_SYSTEM_MESSAGE = "A chat between a curious user and an artificial
|
|
21
22
|
function chat() {
|
22
23
|
return {
|
23
24
|
format(prompt) {
|
24
|
-
(0, ChatPrompt_js_1.validateChatPrompt)(prompt);
|
25
25
|
let text = prompt.system != null
|
26
26
|
? `${prompt.system}\n\n`
|
27
27
|
: `${DEFAULT_SYSTEM_MESSAGE}\n\n`;
|
28
28
|
for (const { role, content } of prompt.messages) {
|
29
29
|
switch (role) {
|
30
30
|
case "user": {
|
31
|
-
|
31
|
+
const textContent = (0, ContentPart_js_1.validateContentIsString)(content, prompt);
|
32
|
+
text += `USER: ${textContent}\n`;
|
32
33
|
break;
|
33
34
|
}
|
34
35
|
case "assistant": {
|
35
|
-
text += `ASSISTANT: ${content}\n`;
|
36
|
+
text += `ASSISTANT: ${(0, ContentPart_js_1.validateContentIsString)(content, prompt)}\n`;
|
36
37
|
break;
|
37
38
|
}
|
39
|
+
case "tool": {
|
40
|
+
throw new InvalidPromptError_js_1.InvalidPromptError("Tool messages are not supported.", prompt);
|
41
|
+
}
|
38
42
|
default: {
|
39
43
|
const _exhaustiveCheck = role;
|
40
44
|
throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
|
@@ -1,5 +1,5 @@
|
|
1
1
|
import { TextGenerationPromptTemplate } from "../TextGenerationPromptTemplate.js";
|
2
|
-
import {
|
2
|
+
import { ChatPrompt } from "./ChatPrompt.js";
|
3
3
|
/**
|
4
4
|
* Formats a chat prompt as a Vicuna prompt.
|
5
5
|
*
|
@@ -13,4 +13,4 @@ import { TextChatPrompt } from "./ChatPrompt.js";
|
|
13
13
|
* ASSISTANT:
|
14
14
|
* ```
|
15
15
|
*/
|
16
|
-
export declare function chat(): TextGenerationPromptTemplate<
|
16
|
+
export declare function chat(): TextGenerationPromptTemplate<ChatPrompt, string>;
|
@@ -1,4 +1,5 @@
|
|
1
|
-
import {
|
1
|
+
import { validateContentIsString } from "./ContentPart.js";
|
2
|
+
import { InvalidPromptError } from "./InvalidPromptError.js";
|
2
3
|
// default Vicuna 1 system message
|
3
4
|
const DEFAULT_SYSTEM_MESSAGE = "A chat between a curious user and an artificial intelligence assistant. " +
|
4
5
|
"The assistant gives helpful, detailed, and polite answers to the user's questions.";
|
@@ -18,20 +19,23 @@ const DEFAULT_SYSTEM_MESSAGE = "A chat between a curious user and an artificial
|
|
18
19
|
export function chat() {
|
19
20
|
return {
|
20
21
|
format(prompt) {
|
21
|
-
validateChatPrompt(prompt);
|
22
22
|
let text = prompt.system != null
|
23
23
|
? `${prompt.system}\n\n`
|
24
24
|
: `${DEFAULT_SYSTEM_MESSAGE}\n\n`;
|
25
25
|
for (const { role, content } of prompt.messages) {
|
26
26
|
switch (role) {
|
27
27
|
case "user": {
|
28
|
-
|
28
|
+
const textContent = validateContentIsString(content, prompt);
|
29
|
+
text += `USER: ${textContent}\n`;
|
29
30
|
break;
|
30
31
|
}
|
31
32
|
case "assistant": {
|
32
|
-
text += `ASSISTANT: ${content}\n`;
|
33
|
+
text += `ASSISTANT: ${validateContentIsString(content, prompt)}\n`;
|
33
34
|
break;
|
34
35
|
}
|
36
|
+
case "tool": {
|
37
|
+
throw new InvalidPromptError("Tool messages are not supported.", prompt);
|
38
|
+
}
|
35
39
|
default: {
|
36
40
|
const _exhaustiveCheck = role;
|
37
41
|
throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
|
@@ -30,7 +30,7 @@ exports.VicunaPrompt = exports.TextPrompt = exports.NeuralChatPrompt = exports.L
|
|
30
30
|
exports.AlpacaPrompt = __importStar(require("./AlpacaPromptTemplate.cjs"));
|
31
31
|
exports.ChatMLPrompt = __importStar(require("./ChatMLPromptTemplate.cjs"));
|
32
32
|
__exportStar(require("./ChatPrompt.cjs"), exports);
|
33
|
-
__exportStar(require("./
|
33
|
+
__exportStar(require("./ContentPart.cjs"), exports);
|
34
34
|
__exportStar(require("./InstructionPrompt.cjs"), exports);
|
35
35
|
__exportStar(require("./InvalidPromptError.cjs"), exports);
|
36
36
|
exports.Llama2Prompt = __importStar(require("./Llama2PromptTemplate.cjs"));
|
@@ -1,7 +1,7 @@
|
|
1
1
|
export * as AlpacaPrompt from "./AlpacaPromptTemplate.js";
|
2
2
|
export * as ChatMLPrompt from "./ChatMLPromptTemplate.js";
|
3
3
|
export * from "./ChatPrompt.js";
|
4
|
-
export * from "./
|
4
|
+
export * from "./ContentPart.js";
|
5
5
|
export * from "./InstructionPrompt.js";
|
6
6
|
export * from "./InvalidPromptError.js";
|
7
7
|
export * as Llama2Prompt from "./Llama2PromptTemplate.js";
|
@@ -1,7 +1,7 @@
|
|
1
1
|
export * as AlpacaPrompt from "./AlpacaPromptTemplate.js";
|
2
2
|
export * as ChatMLPrompt from "./ChatMLPromptTemplate.js";
|
3
3
|
export * from "./ChatPrompt.js";
|
4
|
-
export * from "./
|
4
|
+
export * from "./ContentPart.js";
|
5
5
|
export * from "./InstructionPrompt.js";
|
6
6
|
export * from "./InvalidPromptError.js";
|
7
7
|
export * as Llama2Prompt from "./Llama2PromptTemplate.js";
|
@@ -1,7 +1,6 @@
|
|
1
1
|
"use strict";
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
3
|
exports.trimChatPrompt = void 0;
|
4
|
-
const ChatPrompt_js_1 = require("./ChatPrompt.cjs");
|
5
4
|
/**
|
6
5
|
* Keeps only the most recent messages in the prompt, while leaving enough space for the completion.
|
7
6
|
*
|
@@ -14,7 +13,6 @@ const ChatPrompt_js_1 = require("./ChatPrompt.cjs");
|
|
14
13
|
*/
|
15
14
|
async function trimChatPrompt({ prompt, model, tokenLimit = model.contextWindowSize -
|
16
15
|
(model.settings.maxGenerationTokens ?? model.contextWindowSize / 4), }) {
|
17
|
-
(0, ChatPrompt_js_1.validateChatPrompt)(prompt);
|
18
16
|
let minimalPrompt = {
|
19
17
|
system: prompt.system,
|
20
18
|
messages: [prompt.messages[prompt.messages.length - 1]], // last user message
|
@@ -1,5 +1,5 @@
|
|
1
1
|
import { HasContextWindowSize, HasTokenizer, TextGenerationModel, TextGenerationModelSettings } from "../TextGenerationModel.js";
|
2
|
-
import {
|
2
|
+
import { ChatPrompt } from "./ChatPrompt.js";
|
3
3
|
/**
|
4
4
|
* Keeps only the most recent messages in the prompt, while leaving enough space for the completion.
|
5
5
|
*
|
@@ -11,7 +11,7 @@ import { TextChatPrompt } from "./ChatPrompt.js";
|
|
11
11
|
* @see https://modelfusion.dev/guide/function/generate-text#limiting-the-chat-length
|
12
12
|
*/
|
13
13
|
export declare function trimChatPrompt({ prompt, model, tokenLimit, }: {
|
14
|
-
prompt:
|
15
|
-
model: TextGenerationModel<
|
14
|
+
prompt: ChatPrompt;
|
15
|
+
model: TextGenerationModel<ChatPrompt, TextGenerationModelSettings> & HasTokenizer<ChatPrompt> & HasContextWindowSize;
|
16
16
|
tokenLimit?: number;
|
17
|
-
}): Promise<
|
17
|
+
}): Promise<ChatPrompt>;
|
@@ -1,4 +1,3 @@
|
|
1
|
-
import { validateChatPrompt } from "./ChatPrompt.js";
|
2
1
|
/**
|
3
2
|
* Keeps only the most recent messages in the prompt, while leaving enough space for the completion.
|
4
3
|
*
|
@@ -11,7 +10,6 @@ import { validateChatPrompt } from "./ChatPrompt.js";
|
|
11
10
|
*/
|
12
11
|
export async function trimChatPrompt({ prompt, model, tokenLimit = model.contextWindowSize -
|
13
12
|
(model.settings.maxGenerationTokens ?? model.contextWindowSize / 4), }) {
|
14
|
-
validateChatPrompt(prompt);
|
15
13
|
let minimalPrompt = {
|
16
14
|
system: prompt.system,
|
17
15
|
messages: [prompt.messages[prompt.messages.length - 1]], // last user message
|
@@ -5,9 +5,12 @@ const executeStreamCall_js_1 = require("../executeStreamCall.cjs");
|
|
5
5
|
async function streamText(model, prompt, options) {
|
6
6
|
const shouldTrimWhitespace = model.settings.trimWhitespace ?? true;
|
7
7
|
let accumulatedText = "";
|
8
|
-
let lastFullDelta;
|
9
8
|
let isFirstDelta = true;
|
10
9
|
let trailingWhitespace = "";
|
10
|
+
let resolveText;
|
11
|
+
const textPromise = new Promise((resolve) => {
|
12
|
+
resolveText = resolve;
|
13
|
+
});
|
11
14
|
const fullResponse = await (0, executeStreamCall_js_1.executeStreamCall)({
|
12
15
|
functionType: "stream-text",
|
13
16
|
input: prompt,
|
@@ -15,39 +18,35 @@ async function streamText(model, prompt, options) {
|
|
15
18
|
options,
|
16
19
|
startStream: async (options) => model.doStreamText(prompt, options),
|
17
20
|
processDelta: (delta) => {
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
: "";
|
35
|
-
textDelta = textDelta.trimEnd();
|
36
|
-
}
|
37
|
-
isFirstDelta = false;
|
38
|
-
accumulatedText += textDelta;
|
39
|
-
return textDelta;
|
21
|
+
let textDelta = model.extractTextDelta(delta.deltaValue);
|
22
|
+
if (textDelta == null || textDelta.length === 0) {
|
23
|
+
return undefined;
|
24
|
+
}
|
25
|
+
if (shouldTrimWhitespace) {
|
26
|
+
textDelta = isFirstDelta
|
27
|
+
? // remove leading whitespace:
|
28
|
+
textDelta.trimStart()
|
29
|
+
: // restore trailing whitespace from previous chunk:
|
30
|
+
trailingWhitespace + textDelta;
|
31
|
+
// trim trailing whitespace and store it for the next chunk:
|
32
|
+
const trailingWhitespaceMatch = textDelta.match(/\s+$/);
|
33
|
+
trailingWhitespace = trailingWhitespaceMatch
|
34
|
+
? trailingWhitespaceMatch[0]
|
35
|
+
: "";
|
36
|
+
textDelta = textDelta.trimEnd();
|
40
37
|
}
|
41
|
-
|
38
|
+
isFirstDelta = false;
|
39
|
+
accumulatedText += textDelta;
|
40
|
+
return textDelta;
|
41
|
+
},
|
42
|
+
onDone: () => {
|
43
|
+
resolveText(accumulatedText);
|
42
44
|
},
|
43
|
-
getResult: () => ({
|
44
|
-
response: lastFullDelta,
|
45
|
-
value: accumulatedText,
|
46
|
-
}),
|
47
45
|
});
|
48
46
|
return options?.fullResponse
|
49
47
|
? {
|
50
48
|
textStream: fullResponse.value,
|
49
|
+
text: textPromise,
|
51
50
|
metadata: fullResponse.metadata,
|
52
51
|
}
|
53
52
|
: fullResponse.value;
|
@@ -33,5 +33,6 @@ export declare function streamText<PROMPT>(model: TextStreamingModel<PROMPT>, pr
|
|
33
33
|
fullResponse: true;
|
34
34
|
}): Promise<{
|
35
35
|
textStream: AsyncIterable<string>;
|
36
|
+
text: PromiseLike<string>;
|
36
37
|
metadata: Omit<ModelCallMetadata, "durationInMs" | "finishTimestamp">;
|
37
38
|
}>;
|
@@ -2,9 +2,12 @@ import { executeStreamCall } from "../executeStreamCall.js";
|
|
2
2
|
export async function streamText(model, prompt, options) {
|
3
3
|
const shouldTrimWhitespace = model.settings.trimWhitespace ?? true;
|
4
4
|
let accumulatedText = "";
|
5
|
-
let lastFullDelta;
|
6
5
|
let isFirstDelta = true;
|
7
6
|
let trailingWhitespace = "";
|
7
|
+
let resolveText;
|
8
|
+
const textPromise = new Promise((resolve) => {
|
9
|
+
resolveText = resolve;
|
10
|
+
});
|
8
11
|
const fullResponse = await executeStreamCall({
|
9
12
|
functionType: "stream-text",
|
10
13
|
input: prompt,
|
@@ -12,39 +15,35 @@ export async function streamText(model, prompt, options) {
|
|
12
15
|
options,
|
13
16
|
startStream: async (options) => model.doStreamText(prompt, options),
|
14
17
|
processDelta: (delta) => {
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
: "";
|
32
|
-
textDelta = textDelta.trimEnd();
|
33
|
-
}
|
34
|
-
isFirstDelta = false;
|
35
|
-
accumulatedText += textDelta;
|
36
|
-
return textDelta;
|
18
|
+
let textDelta = model.extractTextDelta(delta.deltaValue);
|
19
|
+
if (textDelta == null || textDelta.length === 0) {
|
20
|
+
return undefined;
|
21
|
+
}
|
22
|
+
if (shouldTrimWhitespace) {
|
23
|
+
textDelta = isFirstDelta
|
24
|
+
? // remove leading whitespace:
|
25
|
+
textDelta.trimStart()
|
26
|
+
: // restore trailing whitespace from previous chunk:
|
27
|
+
trailingWhitespace + textDelta;
|
28
|
+
// trim trailing whitespace and store it for the next chunk:
|
29
|
+
const trailingWhitespaceMatch = textDelta.match(/\s+$/);
|
30
|
+
trailingWhitespace = trailingWhitespaceMatch
|
31
|
+
? trailingWhitespaceMatch[0]
|
32
|
+
: "";
|
33
|
+
textDelta = textDelta.trimEnd();
|
37
34
|
}
|
38
|
-
|
35
|
+
isFirstDelta = false;
|
36
|
+
accumulatedText += textDelta;
|
37
|
+
return textDelta;
|
38
|
+
},
|
39
|
+
onDone: () => {
|
40
|
+
resolveText(accumulatedText);
|
39
41
|
},
|
40
|
-
getResult: () => ({
|
41
|
-
response: lastFullDelta,
|
42
|
-
value: accumulatedText,
|
43
|
-
}),
|
44
42
|
});
|
45
43
|
return options?.fullResponse
|
46
44
|
? {
|
47
45
|
textStream: fullResponse.value,
|
46
|
+
text: textPromise,
|
48
47
|
metadata: fullResponse.metadata,
|
49
48
|
}
|
50
49
|
: fullResponse.value;
|