modelfusion 0.116.1 → 0.118.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +67 -0
- package/README.md +14 -11
- package/core/getFunctionCallLogger.cjs +6 -6
- package/core/getFunctionCallLogger.js +6 -6
- package/model-function/ModelCallEvent.d.ts +1 -1
- package/model-function/embed/EmbeddingEvent.d.ts +1 -1
- package/model-function/embed/EmbeddingModel.d.ts +1 -1
- package/model-function/embed/embed.cjs +5 -5
- package/model-function/embed/embed.d.ts +2 -2
- package/model-function/embed/embed.js +5 -5
- package/model-function/executeStandardCall.cjs +3 -3
- package/model-function/executeStandardCall.d.ts +2 -2
- package/model-function/executeStandardCall.js +3 -3
- package/model-function/generate-image/ImageGenerationEvent.d.ts +1 -1
- package/model-function/generate-image/ImageGenerationModel.d.ts +1 -1
- package/model-function/generate-image/PromptTemplateImageGenerationModel.d.ts +1 -1
- package/model-function/generate-image/generateImage.cjs +2 -2
- package/model-function/generate-image/generateImage.d.ts +1 -1
- package/model-function/generate-image/generateImage.js +2 -2
- package/model-function/generate-speech/SpeechGenerationEvent.d.ts +1 -1
- package/model-function/generate-speech/generateSpeech.cjs +2 -2
- package/model-function/generate-speech/generateSpeech.d.ts +1 -1
- package/model-function/generate-speech/generateSpeech.js +2 -2
- package/model-function/generate-structure/StructureFromTextGenerationModel.cjs +1 -1
- package/model-function/generate-structure/StructureFromTextGenerationModel.js +1 -1
- package/model-function/generate-structure/StructureFromTextStreamingModel.cjs +1 -1
- package/model-function/generate-structure/StructureFromTextStreamingModel.js +1 -1
- package/model-function/generate-structure/StructureGenerationEvent.d.ts +1 -1
- package/model-function/generate-structure/generateStructure.cjs +2 -2
- package/model-function/generate-structure/generateStructure.d.ts +1 -1
- package/model-function/generate-structure/generateStructure.js +2 -2
- package/model-function/generate-text/PromptTemplateFullTextModel.d.ts +2 -2
- package/model-function/generate-text/PromptTemplateTextGenerationModel.d.ts +2 -2
- package/model-function/generate-text/TextGenerationEvent.d.ts +1 -1
- package/model-function/generate-text/TextGenerationModel.d.ts +2 -2
- package/model-function/generate-text/generateText.cjs +3 -3
- package/model-function/generate-text/generateText.d.ts +1 -1
- package/model-function/generate-text/generateText.js +3 -3
- package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.cjs +8 -1
- package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.d.ts +5 -0
- package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.js +6 -0
- package/model-function/generate-text/prompt-template/PromptTemplateProvider.cjs +2 -0
- package/model-function/generate-text/prompt-template/PromptTemplateProvider.d.ts +8 -0
- package/model-function/generate-text/prompt-template/PromptTemplateProvider.js +1 -0
- package/model-function/generate-text/prompt-template/VicunaPromptTemplate.cjs +34 -1
- package/model-function/generate-text/prompt-template/VicunaPromptTemplate.d.ts +9 -0
- package/model-function/generate-text/prompt-template/VicunaPromptTemplate.js +31 -0
- package/model-function/generate-text/prompt-template/VicunaPromptTemplate.test.cjs +28 -0
- package/model-function/generate-text/prompt-template/VicunaPromptTemplate.test.js +29 -1
- package/model-function/generate-text/prompt-template/index.cjs +1 -0
- package/model-function/generate-text/prompt-template/index.d.ts +1 -0
- package/model-function/generate-text/prompt-template/index.js +1 -0
- package/model-function/generate-transcription/TranscriptionEvent.d.ts +1 -1
- package/model-function/generate-transcription/TranscriptionModel.d.ts +1 -1
- package/model-function/generate-transcription/generateTranscription.cjs +1 -1
- package/model-function/generate-transcription/generateTranscription.d.ts +1 -1
- package/model-function/generate-transcription/generateTranscription.js +1 -1
- package/model-provider/automatic1111/Automatic1111ImageGenerationModel.cjs +3 -3
- package/model-provider/automatic1111/Automatic1111ImageGenerationModel.d.ts +1 -1
- package/model-provider/automatic1111/Automatic1111ImageGenerationModel.js +3 -3
- package/model-provider/cohere/CohereTextEmbeddingModel.cjs +3 -3
- package/model-provider/cohere/CohereTextEmbeddingModel.d.ts +1 -1
- package/model-provider/cohere/CohereTextEmbeddingModel.js +3 -3
- package/model-provider/cohere/CohereTextGenerationModel.cjs +3 -3
- package/model-provider/cohere/CohereTextGenerationModel.d.ts +4 -4
- package/model-provider/cohere/CohereTextGenerationModel.js +3 -3
- package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.cjs +3 -3
- package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.d.ts +1 -1
- package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.js +3 -3
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.cjs +3 -3
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.d.ts +4 -4
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.js +3 -3
- package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.cjs +15 -1
- package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.d.ts +4 -0
- package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.js +13 -0
- package/model-provider/llamacpp/LlamaCppCompletionModel.cjs +37 -27
- package/model-provider/llamacpp/LlamaCppCompletionModel.d.ts +18 -8
- package/model-provider/llamacpp/LlamaCppCompletionModel.js +37 -27
- package/model-provider/llamacpp/LlamaCppFacade.cjs +31 -3
- package/model-provider/llamacpp/LlamaCppFacade.d.ts +6 -1
- package/model-provider/llamacpp/LlamaCppFacade.js +6 -1
- package/model-provider/llamacpp/LlamaCppGrammars.cjs +84 -0
- package/model-provider/llamacpp/LlamaCppGrammars.d.ts +18 -0
- package/model-provider/llamacpp/LlamaCppGrammars.js +81 -0
- package/model-provider/llamacpp/LlamaCppPrompt.cjs +59 -0
- package/model-provider/llamacpp/LlamaCppPrompt.d.ts +14 -0
- package/model-provider/llamacpp/LlamaCppPrompt.js +31 -0
- package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.cjs +3 -3
- package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.d.ts +1 -1
- package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.js +3 -3
- package/model-provider/llamacpp/index.cjs +2 -3
- package/model-provider/llamacpp/index.d.ts +1 -2
- package/model-provider/llamacpp/index.js +1 -2
- package/model-provider/mistral/MistralChatModel.cjs +3 -3
- package/model-provider/mistral/MistralChatModel.d.ts +4 -4
- package/model-provider/mistral/MistralChatModel.js +3 -3
- package/model-provider/mistral/MistralTextEmbeddingModel.cjs +3 -3
- package/model-provider/mistral/MistralTextEmbeddingModel.d.ts +1 -1
- package/model-provider/mistral/MistralTextEmbeddingModel.js +3 -3
- package/model-provider/ollama/OllamaChatModel.cjs +3 -3
- package/model-provider/ollama/OllamaChatModel.d.ts +2 -2
- package/model-provider/ollama/OllamaChatModel.js +3 -3
- package/model-provider/ollama/OllamaCompletionModel.cjs +3 -3
- package/model-provider/ollama/OllamaCompletionModel.d.ts +14 -14
- package/model-provider/ollama/OllamaCompletionModel.js +3 -3
- package/model-provider/ollama/OllamaTextEmbeddingModel.cjs +3 -3
- package/model-provider/ollama/OllamaTextEmbeddingModel.d.ts +1 -1
- package/model-provider/ollama/OllamaTextEmbeddingModel.js +3 -3
- package/model-provider/openai/AbstractOpenAIChatModel.cjs +12 -12
- package/model-provider/openai/AbstractOpenAIChatModel.d.ts +6 -6
- package/model-provider/openai/AbstractOpenAIChatModel.js +12 -12
- package/model-provider/openai/AbstractOpenAICompletionModel.cjs +6 -6
- package/model-provider/openai/AbstractOpenAICompletionModel.d.ts +2 -2
- package/model-provider/openai/AbstractOpenAICompletionModel.js +6 -6
- package/model-provider/openai/OpenAIImageGenerationModel.cjs +3 -3
- package/model-provider/openai/OpenAIImageGenerationModel.d.ts +1 -1
- package/model-provider/openai/OpenAIImageGenerationModel.js +3 -3
- package/model-provider/openai/OpenAITextEmbeddingModel.cjs +3 -3
- package/model-provider/openai/OpenAITextEmbeddingModel.d.ts +1 -1
- package/model-provider/openai/OpenAITextEmbeddingModel.js +3 -3
- package/model-provider/openai/OpenAITranscriptionModel.cjs +3 -3
- package/model-provider/openai/OpenAITranscriptionModel.d.ts +1 -1
- package/model-provider/openai/OpenAITranscriptionModel.js +3 -3
- package/model-provider/stability/StabilityImageGenerationModel.cjs +3 -3
- package/model-provider/stability/StabilityImageGenerationModel.d.ts +1 -1
- package/model-provider/stability/StabilityImageGenerationModel.js +3 -3
- package/model-provider/whispercpp/WhisperCppTranscriptionModel.cjs +3 -3
- package/model-provider/whispercpp/WhisperCppTranscriptionModel.d.ts +1 -1
- package/model-provider/whispercpp/WhisperCppTranscriptionModel.js +3 -3
- package/package.json +1 -1
- package/tool/generate-tool-call/TextGenerationToolCallModel.cjs +2 -2
- package/tool/generate-tool-call/TextGenerationToolCallModel.d.ts +1 -1
- package/tool/generate-tool-call/TextGenerationToolCallModel.js +2 -2
- package/tool/generate-tool-call/ToolCallGenerationEvent.d.ts +1 -1
- package/tool/generate-tool-call/ToolCallGenerationModel.d.ts +1 -1
- package/tool/generate-tool-call/generateToolCall.cjs +2 -2
- package/tool/generate-tool-call/generateToolCall.js +2 -2
- package/tool/generate-tool-calls/TextGenerationToolCallsModel.cjs +2 -2
- package/tool/generate-tool-calls/TextGenerationToolCallsModel.d.ts +1 -1
- package/tool/generate-tool-calls/TextGenerationToolCallsModel.js +2 -2
- package/tool/generate-tool-calls/ToolCallsGenerationEvent.d.ts +1 -1
- package/tool/generate-tool-calls/ToolCallsGenerationModel.d.ts +1 -1
- package/tool/generate-tool-calls/generateToolCalls.cjs +2 -2
- package/tool/generate-tool-calls/generateToolCalls.d.ts +1 -1
- package/tool/generate-tool-calls/generateToolCalls.js +2 -2
@@ -43,7 +43,7 @@ export declare class HuggingFaceTextGenerationModel extends AbstractModel<Huggin
|
|
43
43
|
callAPI(prompt: string, callOptions: FunctionCallOptions): Promise<HuggingFaceTextGenerationResponse>;
|
44
44
|
get settingsForEvent(): Partial<HuggingFaceTextGenerationModelSettings>;
|
45
45
|
doGenerateTexts(prompt: string, options: FunctionCallOptions): Promise<{
|
46
|
-
|
46
|
+
rawResponse: {
|
47
47
|
generated_text: string;
|
48
48
|
}[];
|
49
49
|
textGenerationResults: {
|
@@ -52,7 +52,7 @@ export declare class HuggingFaceTextGenerationModel extends AbstractModel<Huggin
|
|
52
52
|
}[];
|
53
53
|
}>;
|
54
54
|
restoreGeneratedTexts(rawResponse: unknown): {
|
55
|
-
|
55
|
+
rawResponse: {
|
56
56
|
generated_text: string;
|
57
57
|
}[];
|
58
58
|
textGenerationResults: {
|
@@ -60,8 +60,8 @@ export declare class HuggingFaceTextGenerationModel extends AbstractModel<Huggin
|
|
60
60
|
finishReason: "unknown";
|
61
61
|
}[];
|
62
62
|
};
|
63
|
-
processTextGenerationResponse(
|
64
|
-
|
63
|
+
processTextGenerationResponse(rawResponse: HuggingFaceTextGenerationResponse): {
|
64
|
+
rawResponse: {
|
65
65
|
generated_text: string;
|
66
66
|
}[];
|
67
67
|
textGenerationResults: {
|
@@ -113,10 +113,10 @@ export class HuggingFaceTextGenerationModel extends AbstractModel {
|
|
113
113
|
schema: zodSchema(huggingFaceTextGenerationResponseSchema),
|
114
114
|
}));
|
115
115
|
}
|
116
|
-
processTextGenerationResponse(
|
116
|
+
processTextGenerationResponse(rawResponse) {
|
117
117
|
return {
|
118
|
-
|
119
|
-
textGenerationResults:
|
118
|
+
rawResponse,
|
119
|
+
textGenerationResults: rawResponse.map((response) => ({
|
120
120
|
text: response.generated_text,
|
121
121
|
finishReason: "unknown",
|
122
122
|
})),
|
@@ -1,11 +1,25 @@
|
|
1
1
|
"use strict";
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
-
exports.chat = exports.instruction = void 0;
|
3
|
+
exports.chat = exports.instruction = exports.text = void 0;
|
4
4
|
const ContentPart_js_1 = require("../../model-function/generate-text/prompt-template/ContentPart.cjs");
|
5
5
|
const InvalidPromptError_js_1 = require("../../model-function/generate-text/prompt-template/InvalidPromptError.cjs");
|
6
|
+
const TextPromptTemplate_js_1 = require("../../model-function/generate-text/prompt-template/TextPromptTemplate.cjs");
|
6
7
|
// default Vicuna 1 system message
|
7
8
|
const DEFAULT_SYSTEM_MESSAGE = "A chat between a curious user and an artificial intelligence assistant. " +
|
8
9
|
"The assistant gives helpful, detailed, and polite answers to the user's questions.";
|
10
|
+
/**
|
11
|
+
* Text prompt.
|
12
|
+
*/
|
13
|
+
function text() {
|
14
|
+
const delegate = (0, TextPromptTemplate_js_1.text)();
|
15
|
+
return {
|
16
|
+
stopSequences: [],
|
17
|
+
format(prompt) {
|
18
|
+
return { text: delegate.format(prompt) };
|
19
|
+
},
|
20
|
+
};
|
21
|
+
}
|
22
|
+
exports.text = text;
|
9
23
|
/**
|
10
24
|
* BakLLaVA 1 uses a Vicuna 1 prompt. This mapping combines it with the LlamaCpp prompt structure.
|
11
25
|
*
|
@@ -2,6 +2,10 @@ import { TextGenerationPromptTemplate } from "../../model-function/generate-text
|
|
2
2
|
import { ChatPrompt } from "../../model-function/generate-text/prompt-template/ChatPrompt.js";
|
3
3
|
import { InstructionPrompt } from "../../model-function/generate-text/prompt-template/InstructionPrompt.js";
|
4
4
|
import { LlamaCppCompletionPrompt } from "./LlamaCppCompletionModel.js";
|
5
|
+
/**
|
6
|
+
* Text prompt.
|
7
|
+
*/
|
8
|
+
export declare function text(): TextGenerationPromptTemplate<string, LlamaCppCompletionPrompt>;
|
5
9
|
/**
|
6
10
|
* BakLLaVA 1 uses a Vicuna 1 prompt. This mapping combines it with the LlamaCpp prompt structure.
|
7
11
|
*
|
@@ -1,8 +1,21 @@
|
|
1
1
|
import { validateContentIsString } from "../../model-function/generate-text/prompt-template/ContentPart.js";
|
2
2
|
import { InvalidPromptError } from "../../model-function/generate-text/prompt-template/InvalidPromptError.js";
|
3
|
+
import { text as vicunaText } from "../../model-function/generate-text/prompt-template/TextPromptTemplate.js";
|
3
4
|
// default Vicuna 1 system message
|
4
5
|
const DEFAULT_SYSTEM_MESSAGE = "A chat between a curious user and an artificial intelligence assistant. " +
|
5
6
|
"The assistant gives helpful, detailed, and polite answers to the user's questions.";
|
7
|
+
/**
|
8
|
+
* Text prompt.
|
9
|
+
*/
|
10
|
+
export function text() {
|
11
|
+
const delegate = vicunaText();
|
12
|
+
return {
|
13
|
+
stopSequences: [],
|
14
|
+
format(prompt) {
|
15
|
+
return { text: delegate.format(prompt) };
|
16
|
+
},
|
17
|
+
};
|
18
|
+
}
|
6
19
|
/**
|
7
20
|
* BakLLaVA 1 uses a Vicuna 1 prompt. This mapping combines it with the LlamaCpp prompt structure.
|
8
21
|
*
|
@@ -8,12 +8,15 @@ const ZodSchema_js_1 = require("../../core/schema/ZodSchema.cjs");
|
|
8
8
|
const parseJSON_js_1 = require("../../core/schema/parseJSON.cjs");
|
9
9
|
const validateTypes_js_1 = require("../../core/schema/validateTypes.cjs");
|
10
10
|
const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
|
11
|
+
const StructureFromTextStreamingModel_js_1 = require("../../model-function/generate-structure/StructureFromTextStreamingModel.cjs");
|
11
12
|
const PromptTemplateTextStreamingModel_js_1 = require("../../model-function/generate-text/PromptTemplateTextStreamingModel.cjs");
|
12
13
|
const TextGenerationModel_js_1 = require("../../model-function/generate-text/TextGenerationModel.cjs");
|
13
14
|
const AsyncQueue_js_1 = require("../../util/AsyncQueue.cjs");
|
14
15
|
const parseEventSourceStream_js_1 = require("../../util/streaming/parseEventSourceStream.cjs");
|
15
16
|
const LlamaCppApiConfiguration_js_1 = require("./LlamaCppApiConfiguration.cjs");
|
16
17
|
const LlamaCppError_js_1 = require("./LlamaCppError.cjs");
|
18
|
+
const LlamaCppGrammars_js_1 = require("./LlamaCppGrammars.cjs");
|
19
|
+
const LlamaCppPrompt_js_1 = require("./LlamaCppPrompt.cjs");
|
17
20
|
const LlamaCppTokenizer_js_1 = require("./LlamaCppTokenizer.cjs");
|
18
21
|
class LlamaCppCompletionModel extends AbstractModel_js_1.AbstractModel {
|
19
22
|
constructor(settings = {}) {
|
@@ -139,23 +142,23 @@ class LlamaCppCompletionModel extends AbstractModel_js_1.AbstractModel {
|
|
139
142
|
schema: (0, ZodSchema_js_1.zodSchema)(llamaCppTextGenerationResponseSchema),
|
140
143
|
}));
|
141
144
|
}
|
142
|
-
processTextGenerationResponse(
|
145
|
+
processTextGenerationResponse(rawResponse) {
|
143
146
|
return {
|
144
|
-
|
147
|
+
rawResponse,
|
145
148
|
textGenerationResults: [
|
146
149
|
{
|
147
|
-
text:
|
148
|
-
finishReason:
|
150
|
+
text: rawResponse.content,
|
151
|
+
finishReason: rawResponse.stopped_eos || rawResponse.stopped_word
|
149
152
|
? "stop"
|
150
|
-
:
|
153
|
+
: rawResponse.stopped_limit
|
151
154
|
? "length"
|
152
155
|
: "unknown",
|
153
156
|
},
|
154
157
|
],
|
155
158
|
usage: {
|
156
|
-
promptTokens:
|
157
|
-
completionTokens:
|
158
|
-
totalTokens:
|
159
|
+
promptTokens: rawResponse.tokens_evaluated,
|
160
|
+
completionTokens: rawResponse.tokens_predicted,
|
161
|
+
totalTokens: rawResponse.tokens_evaluated + rawResponse.tokens_predicted,
|
159
162
|
},
|
160
163
|
};
|
161
164
|
}
|
@@ -167,27 +170,34 @@ class LlamaCppCompletionModel extends AbstractModel_js_1.AbstractModel {
|
|
167
170
|
extractTextDelta(delta) {
|
168
171
|
return delta.content;
|
169
172
|
}
|
173
|
+
asStructureGenerationModel(promptTemplate) {
|
174
|
+
return "adaptModel" in promptTemplate
|
175
|
+
? new StructureFromTextStreamingModel_js_1.StructureFromTextStreamingModel({
|
176
|
+
model: promptTemplate.adaptModel(this),
|
177
|
+
template: promptTemplate,
|
178
|
+
})
|
179
|
+
: new StructureFromTextStreamingModel_js_1.StructureFromTextStreamingModel({
|
180
|
+
model: this,
|
181
|
+
template: promptTemplate,
|
182
|
+
});
|
183
|
+
}
|
184
|
+
withJsonOutput() {
|
185
|
+
// don't override the grammar if it's already set (to support more restrictive grammars)
|
186
|
+
return this.settings.grammar == null
|
187
|
+
? this.withSettings({ grammar: LlamaCppGrammars_js_1.json })
|
188
|
+
: this;
|
189
|
+
}
|
190
|
+
get promptTemplateProvider() {
|
191
|
+
return this.settings.promptTemplate ?? LlamaCppPrompt_js_1.Text;
|
192
|
+
}
|
170
193
|
withTextPrompt() {
|
171
|
-
return this.withPromptTemplate(
|
172
|
-
format(prompt) {
|
173
|
-
return { text: prompt };
|
174
|
-
},
|
175
|
-
stopSequences: [],
|
176
|
-
});
|
194
|
+
return this.withPromptTemplate(this.promptTemplateProvider.text());
|
177
195
|
}
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
return
|
183
|
-
model: this.withTextPrompt().withSettings({
|
184
|
-
stopSequences: [
|
185
|
-
...(this.settings.stopSequences ?? []),
|
186
|
-
...promptTemplate.stopSequences,
|
187
|
-
],
|
188
|
-
}),
|
189
|
-
promptTemplate,
|
190
|
-
});
|
196
|
+
withInstructionPrompt() {
|
197
|
+
return this.withPromptTemplate(this.promptTemplateProvider.instruction());
|
198
|
+
}
|
199
|
+
withChatPrompt() {
|
200
|
+
return this.withPromptTemplate(this.promptTemplateProvider.chat());
|
191
201
|
}
|
192
202
|
/**
|
193
203
|
* Maps the prompt for the full Llama.cpp prompt template (incl. image support).
|
@@ -4,9 +4,14 @@ import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
|
|
4
4
|
import { ResponseHandler } from "../../core/api/postToApi.js";
|
5
5
|
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
6
6
|
import { Delta } from "../../model-function/Delta.js";
|
7
|
+
import { FlexibleStructureFromTextPromptTemplate, StructureFromTextPromptTemplate } from "../../model-function/generate-structure/StructureFromTextPromptTemplate.js";
|
8
|
+
import { StructureFromTextStreamingModel } from "../../model-function/generate-structure/StructureFromTextStreamingModel.js";
|
7
9
|
import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
|
8
10
|
import { TextGenerationModelSettings, TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
|
9
11
|
import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
|
12
|
+
import { ChatPrompt } from "../../model-function/generate-text/prompt-template/ChatPrompt.js";
|
13
|
+
import { InstructionPrompt } from "../../model-function/generate-text/prompt-template/InstructionPrompt.js";
|
14
|
+
import { TextGenerationPromptTemplateProvider } from "../../model-function/generate-text/prompt-template/PromptTemplateProvider.js";
|
10
15
|
import { LlamaCppTokenizer } from "./LlamaCppTokenizer.js";
|
11
16
|
export interface LlamaCppCompletionModelSettings<CONTEXT_WINDOW_SIZE extends number | undefined> extends TextGenerationModelSettings {
|
12
17
|
api?: ApiConfiguration;
|
@@ -121,6 +126,10 @@ export interface LlamaCppCompletionModelSettings<CONTEXT_WINDOW_SIZE extends num
|
|
121
126
|
* If is -1 the task will be assigned to a Idle slot (default: -1)
|
122
127
|
*/
|
123
128
|
slotId?: number;
|
129
|
+
/**
|
130
|
+
* Prompt template provider that is used when calling `.withTextPrompt()`, `withInstructionPrompt()` or `withChatPrompt()`.
|
131
|
+
*/
|
132
|
+
promptTemplate?: TextGenerationPromptTemplateProvider<LlamaCppCompletionPrompt>;
|
124
133
|
}
|
125
134
|
export interface LlamaCppCompletionPrompt {
|
126
135
|
/**
|
@@ -144,7 +153,7 @@ export declare class LlamaCppCompletionModel<CONTEXT_WINDOW_SIZE extends number
|
|
144
153
|
get settingsForEvent(): Partial<LlamaCppCompletionModelSettings<CONTEXT_WINDOW_SIZE>>;
|
145
154
|
countPromptTokens(prompt: LlamaCppCompletionPrompt): Promise<number>;
|
146
155
|
doGenerateTexts(prompt: LlamaCppCompletionPrompt, options: FunctionCallOptions): Promise<{
|
147
|
-
|
156
|
+
rawResponse: {
|
148
157
|
model: string;
|
149
158
|
stop: true;
|
150
159
|
content: string;
|
@@ -204,7 +213,7 @@ export declare class LlamaCppCompletionModel<CONTEXT_WINDOW_SIZE extends number
|
|
204
213
|
};
|
205
214
|
}>;
|
206
215
|
restoreGeneratedTexts(rawResponse: unknown): {
|
207
|
-
|
216
|
+
rawResponse: {
|
208
217
|
model: string;
|
209
218
|
stop: true;
|
210
219
|
content: string;
|
@@ -263,8 +272,8 @@ export declare class LlamaCppCompletionModel<CONTEXT_WINDOW_SIZE extends number
|
|
263
272
|
totalTokens: number;
|
264
273
|
};
|
265
274
|
};
|
266
|
-
processTextGenerationResponse(
|
267
|
-
|
275
|
+
processTextGenerationResponse(rawResponse: LlamaCppTextGenerationResponse): {
|
276
|
+
rawResponse: {
|
268
277
|
model: string;
|
269
278
|
stop: true;
|
270
279
|
content: string;
|
@@ -376,11 +385,12 @@ export declare class LlamaCppCompletionModel<CONTEXT_WINDOW_SIZE extends number
|
|
376
385
|
content: string;
|
377
386
|
}>>>;
|
378
387
|
extractTextDelta(delta: unknown): string;
|
388
|
+
asStructureGenerationModel<INPUT_PROMPT, LlamaCppPrompt>(promptTemplate: StructureFromTextPromptTemplate<INPUT_PROMPT, LlamaCppPrompt> | FlexibleStructureFromTextPromptTemplate<INPUT_PROMPT, unknown>): StructureFromTextStreamingModel<INPUT_PROMPT, unknown, TextStreamingModel<unknown, TextGenerationModelSettings>> | StructureFromTextStreamingModel<INPUT_PROMPT, LlamaCppPrompt, TextStreamingModel<LlamaCppPrompt, TextGenerationModelSettings>>;
|
389
|
+
withJsonOutput(): this;
|
390
|
+
private get promptTemplateProvider();
|
379
391
|
withTextPrompt(): PromptTemplateTextStreamingModel<string, LlamaCppCompletionPrompt, LlamaCppCompletionModelSettings<CONTEXT_WINDOW_SIZE>, this>;
|
380
|
-
|
381
|
-
|
382
|
-
*/
|
383
|
-
withTextPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, string>): PromptTemplateTextStreamingModel<INPUT_PROMPT, string, LlamaCppCompletionModelSettings<CONTEXT_WINDOW_SIZE>, PromptTemplateTextStreamingModel<string, LlamaCppCompletionPrompt, LlamaCppCompletionModelSettings<CONTEXT_WINDOW_SIZE>, this>>;
|
392
|
+
withInstructionPrompt(): PromptTemplateTextStreamingModel<InstructionPrompt, LlamaCppCompletionPrompt, LlamaCppCompletionModelSettings<CONTEXT_WINDOW_SIZE>, this>;
|
393
|
+
withChatPrompt(): PromptTemplateTextStreamingModel<ChatPrompt, LlamaCppCompletionPrompt, LlamaCppCompletionModelSettings<CONTEXT_WINDOW_SIZE>, this>;
|
384
394
|
/**
|
385
395
|
* Maps the prompt for the full Llama.cpp prompt template (incl. image support).
|
386
396
|
*/
|
@@ -5,12 +5,15 @@ import { zodSchema } from "../../core/schema/ZodSchema.js";
|
|
5
5
|
import { parseJSON } from "../../core/schema/parseJSON.js";
|
6
6
|
import { validateTypes } from "../../core/schema/validateTypes.js";
|
7
7
|
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
8
|
+
import { StructureFromTextStreamingModel } from "../../model-function/generate-structure/StructureFromTextStreamingModel.js";
|
8
9
|
import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
|
9
10
|
import { textGenerationModelProperties, } from "../../model-function/generate-text/TextGenerationModel.js";
|
10
11
|
import { AsyncQueue } from "../../util/AsyncQueue.js";
|
11
12
|
import { parseEventSourceStream } from "../../util/streaming/parseEventSourceStream.js";
|
12
13
|
import { LlamaCppApiConfiguration } from "./LlamaCppApiConfiguration.js";
|
13
14
|
import { failedLlamaCppCallResponseHandler } from "./LlamaCppError.js";
|
15
|
+
import { json } from "./LlamaCppGrammars.js";
|
16
|
+
import { Text } from "./LlamaCppPrompt.js";
|
14
17
|
import { LlamaCppTokenizer } from "./LlamaCppTokenizer.js";
|
15
18
|
export class LlamaCppCompletionModel extends AbstractModel {
|
16
19
|
constructor(settings = {}) {
|
@@ -136,23 +139,23 @@ export class LlamaCppCompletionModel extends AbstractModel {
|
|
136
139
|
schema: zodSchema(llamaCppTextGenerationResponseSchema),
|
137
140
|
}));
|
138
141
|
}
|
139
|
-
processTextGenerationResponse(
|
142
|
+
processTextGenerationResponse(rawResponse) {
|
140
143
|
return {
|
141
|
-
|
144
|
+
rawResponse,
|
142
145
|
textGenerationResults: [
|
143
146
|
{
|
144
|
-
text:
|
145
|
-
finishReason:
|
147
|
+
text: rawResponse.content,
|
148
|
+
finishReason: rawResponse.stopped_eos || rawResponse.stopped_word
|
146
149
|
? "stop"
|
147
|
-
:
|
150
|
+
: rawResponse.stopped_limit
|
148
151
|
? "length"
|
149
152
|
: "unknown",
|
150
153
|
},
|
151
154
|
],
|
152
155
|
usage: {
|
153
|
-
promptTokens:
|
154
|
-
completionTokens:
|
155
|
-
totalTokens:
|
156
|
+
promptTokens: rawResponse.tokens_evaluated,
|
157
|
+
completionTokens: rawResponse.tokens_predicted,
|
158
|
+
totalTokens: rawResponse.tokens_evaluated + rawResponse.tokens_predicted,
|
156
159
|
},
|
157
160
|
};
|
158
161
|
}
|
@@ -164,27 +167,34 @@ export class LlamaCppCompletionModel extends AbstractModel {
|
|
164
167
|
extractTextDelta(delta) {
|
165
168
|
return delta.content;
|
166
169
|
}
|
170
|
+
asStructureGenerationModel(promptTemplate) {
|
171
|
+
return "adaptModel" in promptTemplate
|
172
|
+
? new StructureFromTextStreamingModel({
|
173
|
+
model: promptTemplate.adaptModel(this),
|
174
|
+
template: promptTemplate,
|
175
|
+
})
|
176
|
+
: new StructureFromTextStreamingModel({
|
177
|
+
model: this,
|
178
|
+
template: promptTemplate,
|
179
|
+
});
|
180
|
+
}
|
181
|
+
withJsonOutput() {
|
182
|
+
// don't override the grammar if it's already set (to support more restrictive grammars)
|
183
|
+
return this.settings.grammar == null
|
184
|
+
? this.withSettings({ grammar: json })
|
185
|
+
: this;
|
186
|
+
}
|
187
|
+
get promptTemplateProvider() {
|
188
|
+
return this.settings.promptTemplate ?? Text;
|
189
|
+
}
|
167
190
|
withTextPrompt() {
|
168
|
-
return this.withPromptTemplate(
|
169
|
-
format(prompt) {
|
170
|
-
return { text: prompt };
|
171
|
-
},
|
172
|
-
stopSequences: [],
|
173
|
-
});
|
191
|
+
return this.withPromptTemplate(this.promptTemplateProvider.text());
|
174
192
|
}
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
return
|
180
|
-
model: this.withTextPrompt().withSettings({
|
181
|
-
stopSequences: [
|
182
|
-
...(this.settings.stopSequences ?? []),
|
183
|
-
...promptTemplate.stopSequences,
|
184
|
-
],
|
185
|
-
}),
|
186
|
-
promptTemplate,
|
187
|
-
});
|
193
|
+
withInstructionPrompt() {
|
194
|
+
return this.withPromptTemplate(this.promptTemplateProvider.instruction());
|
195
|
+
}
|
196
|
+
withChatPrompt() {
|
197
|
+
return this.withPromptTemplate(this.promptTemplateProvider.chat());
|
188
198
|
}
|
189
199
|
/**
|
190
200
|
* Maps the prompt for the full Llama.cpp prompt template (incl. image support).
|
@@ -1,6 +1,29 @@
|
|
1
1
|
"use strict";
|
2
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
3
|
+
if (k2 === undefined) k2 = k;
|
4
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
5
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
6
|
+
desc = { enumerable: true, get: function() { return m[k]; } };
|
7
|
+
}
|
8
|
+
Object.defineProperty(o, k2, desc);
|
9
|
+
}) : (function(o, m, k, k2) {
|
10
|
+
if (k2 === undefined) k2 = k;
|
11
|
+
o[k2] = m[k];
|
12
|
+
}));
|
13
|
+
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
14
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
15
|
+
}) : function(o, v) {
|
16
|
+
o["default"] = v;
|
17
|
+
});
|
18
|
+
var __importStar = (this && this.__importStar) || function (mod) {
|
19
|
+
if (mod && mod.__esModule) return mod;
|
20
|
+
var result = {};
|
21
|
+
if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
|
22
|
+
__setModuleDefault(result, mod);
|
23
|
+
return result;
|
24
|
+
};
|
2
25
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
-
exports.Tokenizer = exports.TextEmbedder = exports.
|
26
|
+
exports.prompt = exports.grammar = exports.Tokenizer = exports.TextEmbedder = exports.CompletionTextGenerator = exports.Api = void 0;
|
4
27
|
const LlamaCppApiConfiguration_js_1 = require("./LlamaCppApiConfiguration.cjs");
|
5
28
|
const LlamaCppCompletionModel_js_1 = require("./LlamaCppCompletionModel.cjs");
|
6
29
|
const LlamaCppTextEmbeddingModel_js_1 = require("./LlamaCppTextEmbeddingModel.cjs");
|
@@ -13,10 +36,10 @@ function Api(settings) {
|
|
13
36
|
return new LlamaCppApiConfiguration_js_1.LlamaCppApiConfiguration(settings);
|
14
37
|
}
|
15
38
|
exports.Api = Api;
|
16
|
-
function
|
39
|
+
function CompletionTextGenerator(settings = {}) {
|
17
40
|
return new LlamaCppCompletionModel_js_1.LlamaCppCompletionModel(settings);
|
18
41
|
}
|
19
|
-
exports.
|
42
|
+
exports.CompletionTextGenerator = CompletionTextGenerator;
|
20
43
|
function TextEmbedder(settings = {}) {
|
21
44
|
return new LlamaCppTextEmbeddingModel_js_1.LlamaCppTextEmbeddingModel(settings);
|
22
45
|
}
|
@@ -25,3 +48,8 @@ function Tokenizer(api = new LlamaCppApiConfiguration_js_1.LlamaCppApiConfigurat
|
|
25
48
|
return new LlamaCppTokenizer_js_1.LlamaCppTokenizer(api);
|
26
49
|
}
|
27
50
|
exports.Tokenizer = Tokenizer;
|
51
|
+
/**
|
52
|
+
* GBNF grammars. You can use them in the `grammar` option of the `TextGenerator` model.
|
53
|
+
*/
|
54
|
+
exports.grammar = __importStar(require("./LlamaCppGrammars.cjs"));
|
55
|
+
exports.prompt = __importStar(require("./LlamaCppPrompt.cjs"));
|
@@ -9,6 +9,11 @@ import { LlamaCppTokenizer } from "./LlamaCppTokenizer.js";
|
|
9
9
|
* It calls the API at http://127.0.0.1:8080 by default.
|
10
10
|
*/
|
11
11
|
export declare function Api(settings: PartialBaseUrlPartsApiConfigurationOptions): LlamaCppApiConfiguration;
|
12
|
-
export declare function
|
12
|
+
export declare function CompletionTextGenerator<CONTEXT_WINDOW_SIZE extends number>(settings?: LlamaCppCompletionModelSettings<CONTEXT_WINDOW_SIZE>): LlamaCppCompletionModel<CONTEXT_WINDOW_SIZE>;
|
13
13
|
export declare function TextEmbedder(settings?: LlamaCppTextEmbeddingModelSettings): LlamaCppTextEmbeddingModel;
|
14
14
|
export declare function Tokenizer(api?: ApiConfiguration): LlamaCppTokenizer;
|
15
|
+
/**
|
16
|
+
* GBNF grammars. You can use them in the `grammar` option of the `TextGenerator` model.
|
17
|
+
*/
|
18
|
+
export * as grammar from "./LlamaCppGrammars.js";
|
19
|
+
export * as prompt from "./LlamaCppPrompt.js";
|
@@ -9,7 +9,7 @@ import { LlamaCppTokenizer } from "./LlamaCppTokenizer.js";
|
|
9
9
|
export function Api(settings) {
|
10
10
|
return new LlamaCppApiConfiguration(settings);
|
11
11
|
}
|
12
|
-
export function
|
12
|
+
export function CompletionTextGenerator(settings = {}) {
|
13
13
|
return new LlamaCppCompletionModel(settings);
|
14
14
|
}
|
15
15
|
export function TextEmbedder(settings = {}) {
|
@@ -18,3 +18,8 @@ export function TextEmbedder(settings = {}) {
|
|
18
18
|
export function Tokenizer(api = new LlamaCppApiConfiguration()) {
|
19
19
|
return new LlamaCppTokenizer(api);
|
20
20
|
}
|
21
|
+
/**
|
22
|
+
* GBNF grammars. You can use them in the `grammar` option of the `TextGenerator` model.
|
23
|
+
*/
|
24
|
+
export * as grammar from "./LlamaCppGrammars.js";
|
25
|
+
export * as prompt from "./LlamaCppPrompt.js";
|
@@ -0,0 +1,84 @@
|
|
1
|
+
"use strict";
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
+
exports.list = exports.jsonArray = exports.json = void 0;
|
4
|
+
/**
|
5
|
+
* GBNF grammar for JSON.
|
6
|
+
*
|
7
|
+
* @see https://github.com/ggerganov/llama.cpp/blob/master/grammars/json.gbnf
|
8
|
+
*/
|
9
|
+
exports.json = `
|
10
|
+
root ::= object
|
11
|
+
value ::= object | array | string | number | ("true" | "false" | "null") ws
|
12
|
+
|
13
|
+
object ::=
|
14
|
+
"{" ws (
|
15
|
+
string ":" ws value
|
16
|
+
("," ws string ":" ws value)*
|
17
|
+
)? "}" ws
|
18
|
+
|
19
|
+
array ::=
|
20
|
+
"[" ws (
|
21
|
+
value
|
22
|
+
("," ws value)*
|
23
|
+
)? "]" ws
|
24
|
+
|
25
|
+
string ::=
|
26
|
+
"\\"" (
|
27
|
+
[^"\\\\] |
|
28
|
+
"\\\\" (["\\\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes
|
29
|
+
)* "\\"" ws
|
30
|
+
|
31
|
+
number ::= ("-"? ([0-9] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [-+]? [0-9]+)? ws
|
32
|
+
|
33
|
+
# Optional space: by convention, applied in this grammar after literal chars when allowed
|
34
|
+
ws ::= ([ \t\n] ws)?
|
35
|
+
`;
|
36
|
+
/**
|
37
|
+
* GBNF grammar for JSON array outputs. Restricts whitespace at the end of the array.
|
38
|
+
*
|
39
|
+
* @see https://github.com/ggerganov/llama.cpp/blob/master/grammars/json_arr.gbnf
|
40
|
+
*/
|
41
|
+
exports.jsonArray = `
|
42
|
+
root ::= arr
|
43
|
+
value ::= object | array | string | number | ("true" | "false" | "null") ws
|
44
|
+
|
45
|
+
arr ::=
|
46
|
+
"[\n" ws (
|
47
|
+
value
|
48
|
+
(",\n" ws value)*
|
49
|
+
)? "]"
|
50
|
+
|
51
|
+
object ::=
|
52
|
+
"{" ws (
|
53
|
+
string ":" ws value
|
54
|
+
("," ws string ":" ws value)*
|
55
|
+
)? "}" ws
|
56
|
+
|
57
|
+
array ::=
|
58
|
+
"[" ws (
|
59
|
+
value
|
60
|
+
("," ws value)*
|
61
|
+
)? "]" ws
|
62
|
+
|
63
|
+
string ::=
|
64
|
+
"\\"" (
|
65
|
+
[^"\\\\] |
|
66
|
+
"\\\\" (["\\\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes
|
67
|
+
)* "\\"" ws
|
68
|
+
|
69
|
+
number ::= ("-"? ([0-9] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [-+]? [0-9]+)? ws
|
70
|
+
|
71
|
+
# Optional space: by convention, applied in this grammar after literal chars when allowed
|
72
|
+
ws ::= ([ \t\n] ws)?
|
73
|
+
`;
|
74
|
+
/**
|
75
|
+
* GBNF grammar for list outputs. List items are separated by newlines and start with `- `.
|
76
|
+
*
|
77
|
+
* @see https://github.com/ggerganov/llama.cpp/blob/master/grammars/list.gbnf
|
78
|
+
*/
|
79
|
+
exports.list = `
|
80
|
+
root ::= item+
|
81
|
+
|
82
|
+
# Excludes various line break characters
|
83
|
+
item ::= "- " [^\r\n\x0b\x0c\x85\u2028\u2029]+ "\n"
|
84
|
+
`;
|
@@ -0,0 +1,18 @@
|
|
1
|
+
/**
|
2
|
+
* GBNF grammar for JSON.
|
3
|
+
*
|
4
|
+
* @see https://github.com/ggerganov/llama.cpp/blob/master/grammars/json.gbnf
|
5
|
+
*/
|
6
|
+
export declare const json: string;
|
7
|
+
/**
|
8
|
+
* GBNF grammar for JSON array outputs. Restricts whitespace at the end of the array.
|
9
|
+
*
|
10
|
+
* @see https://github.com/ggerganov/llama.cpp/blob/master/grammars/json_arr.gbnf
|
11
|
+
*/
|
12
|
+
export declare const jsonArray: string;
|
13
|
+
/**
|
14
|
+
* GBNF grammar for list outputs. List items are separated by newlines and start with `- `.
|
15
|
+
*
|
16
|
+
* @see https://github.com/ggerganov/llama.cpp/blob/master/grammars/list.gbnf
|
17
|
+
*/
|
18
|
+
export declare const list: string;
|
@@ -0,0 +1,81 @@
|
|
1
|
+
/**
|
2
|
+
* GBNF grammar for JSON.
|
3
|
+
*
|
4
|
+
* @see https://github.com/ggerganov/llama.cpp/blob/master/grammars/json.gbnf
|
5
|
+
*/
|
6
|
+
export const json = `
|
7
|
+
root ::= object
|
8
|
+
value ::= object | array | string | number | ("true" | "false" | "null") ws
|
9
|
+
|
10
|
+
object ::=
|
11
|
+
"{" ws (
|
12
|
+
string ":" ws value
|
13
|
+
("," ws string ":" ws value)*
|
14
|
+
)? "}" ws
|
15
|
+
|
16
|
+
array ::=
|
17
|
+
"[" ws (
|
18
|
+
value
|
19
|
+
("," ws value)*
|
20
|
+
)? "]" ws
|
21
|
+
|
22
|
+
string ::=
|
23
|
+
"\\"" (
|
24
|
+
[^"\\\\] |
|
25
|
+
"\\\\" (["\\\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes
|
26
|
+
)* "\\"" ws
|
27
|
+
|
28
|
+
number ::= ("-"? ([0-9] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [-+]? [0-9]+)? ws
|
29
|
+
|
30
|
+
# Optional space: by convention, applied in this grammar after literal chars when allowed
|
31
|
+
ws ::= ([ \t\n] ws)?
|
32
|
+
`;
|
33
|
+
/**
|
34
|
+
* GBNF grammar for JSON array outputs. Restricts whitespace at the end of the array.
|
35
|
+
*
|
36
|
+
* @see https://github.com/ggerganov/llama.cpp/blob/master/grammars/json_arr.gbnf
|
37
|
+
*/
|
38
|
+
export const jsonArray = `
|
39
|
+
root ::= arr
|
40
|
+
value ::= object | array | string | number | ("true" | "false" | "null") ws
|
41
|
+
|
42
|
+
arr ::=
|
43
|
+
"[\n" ws (
|
44
|
+
value
|
45
|
+
(",\n" ws value)*
|
46
|
+
)? "]"
|
47
|
+
|
48
|
+
object ::=
|
49
|
+
"{" ws (
|
50
|
+
string ":" ws value
|
51
|
+
("," ws string ":" ws value)*
|
52
|
+
)? "}" ws
|
53
|
+
|
54
|
+
array ::=
|
55
|
+
"[" ws (
|
56
|
+
value
|
57
|
+
("," ws value)*
|
58
|
+
)? "]" ws
|
59
|
+
|
60
|
+
string ::=
|
61
|
+
"\\"" (
|
62
|
+
[^"\\\\] |
|
63
|
+
"\\\\" (["\\\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes
|
64
|
+
)* "\\"" ws
|
65
|
+
|
66
|
+
number ::= ("-"? ([0-9] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [-+]? [0-9]+)? ws
|
67
|
+
|
68
|
+
# Optional space: by convention, applied in this grammar after literal chars when allowed
|
69
|
+
ws ::= ([ \t\n] ws)?
|
70
|
+
`;
|
71
|
+
/**
|
72
|
+
* GBNF grammar for list outputs. List items are separated by newlines and start with `- `.
|
73
|
+
*
|
74
|
+
* @see https://github.com/ggerganov/llama.cpp/blob/master/grammars/list.gbnf
|
75
|
+
*/
|
76
|
+
export const list = `
|
77
|
+
root ::= item+
|
78
|
+
|
79
|
+
# Excludes various line break characters
|
80
|
+
item ::= "- " [^\r\n\x0b\x0c\x85\u2028\u2029]+ "\n"
|
81
|
+
`;
|